blob: a56e953f962ad06f28f4ffb1e7dce0ce440487ad [file] [log] [blame]
Elliott Hughese6c57fc2014-05-23 20:06:03 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Hans Boehm00aaea32014-08-19 16:14:01 -070017#include <gtest/gtest.h>
Tom Cherry76e2b152019-07-18 13:15:47 -070018
Elliott Hugheseb53f072025-03-05 12:49:09 -080019// The real <stdatomic.h> checks for the availability of C++'s <atomic> and
20// uses that instead if present.
21// We want to test the C interfaces, so we instead include
22// <bits/stdatomic.h> directly.
23// This doesn't entirely work because gtest also (transitively) pulls in <atomic>.
24// It's not clear there's a good fix for this,
25// other than switching to a non-C++ unit test framework for bionic.
Ryan Prichard2f21bea2025-06-30 14:02:21 -070026// Bionic has <stdatomic.h>, which includes <bits/stdatomic.h>, but GCC and
27// Clang only provide <stdatomic.h>, so only include <bits/stdatomic.h> when it
28// exists. That is, include <bits/stdatomic.h> for bionic but not for glibc.
29#if __has_include(<bits/stdatomic.h>)
Tom Cherry76e2b152019-07-18 13:15:47 -070030#include <bits/stdatomic.h>
Ryan Prichard2f21bea2025-06-30 14:02:21 -070031#else
32#include <stdatomic.h>
33#endif
Tom Cherry76e2b152019-07-18 13:15:47 -070034
Hans Boehm00aaea32014-08-19 16:14:01 -070035#include <pthread.h>
36#include <stdint.h>
Elliott Hughese6c57fc2014-05-23 20:06:03 -070037
38TEST(stdatomic, LOCK_FREE) {
39 ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
40 ASSERT_TRUE(ATOMIC_CHAR16_T_LOCK_FREE);
41 ASSERT_TRUE(ATOMIC_CHAR32_T_LOCK_FREE);
42 ASSERT_TRUE(ATOMIC_CHAR_LOCK_FREE);
43 ASSERT_TRUE(ATOMIC_INT_LOCK_FREE);
44 ASSERT_TRUE(ATOMIC_LLONG_LOCK_FREE);
45 ASSERT_TRUE(ATOMIC_LONG_LOCK_FREE);
46 ASSERT_TRUE(ATOMIC_POINTER_LOCK_FREE);
47 ASSERT_TRUE(ATOMIC_SHORT_LOCK_FREE);
48 ASSERT_TRUE(ATOMIC_WCHAR_T_LOCK_FREE);
49}
50
51TEST(stdatomic, init) {
Elliott Hugheseb53f072025-03-05 12:49:09 -080052 // ATOMIC_VAR_INIT has been removed from C23,
53 // but is still in POSIX 2024.
54 // Even if it is removed from there,
55 // we should probably keep it indefinitely for source compatibility.
56 // libc++'s <atomic> (which we can't entirely avoid: see above)
57 // marks the macro deprecated,
58 // so we need to silence that.
59#pragma clang diagnostic push
60#pragma clang diagnostic ignored "-Wdeprecated-pragma"
61 atomic_int v = ATOMIC_VAR_INIT(123);
Elliott Hughese6c57fc2014-05-23 20:06:03 -070062 ASSERT_EQ(123, atomic_load(&v));
Elliott Hugheseb53f072025-03-05 12:49:09 -080063#pragma clang diagnostic pop
Elliott Hughese6c57fc2014-05-23 20:06:03 -070064
Nick Desaulniers2e65afe2024-11-19 09:27:06 -080065 atomic_store_explicit(&v, 456, memory_order_relaxed);
Elliott Hughese6c57fc2014-05-23 20:06:03 -070066 ASSERT_EQ(456, atomic_load(&v));
67
68 atomic_flag f = ATOMIC_FLAG_INIT;
69 ASSERT_FALSE(atomic_flag_test_and_set(&f));
70}
71
72TEST(stdatomic, atomic_thread_fence) {
73 atomic_thread_fence(memory_order_relaxed);
74 atomic_thread_fence(memory_order_consume);
75 atomic_thread_fence(memory_order_acquire);
76 atomic_thread_fence(memory_order_release);
77 atomic_thread_fence(memory_order_acq_rel);
78 atomic_thread_fence(memory_order_seq_cst);
79}
80
81TEST(stdatomic, atomic_signal_fence) {
82 atomic_signal_fence(memory_order_relaxed);
83 atomic_signal_fence(memory_order_consume);
84 atomic_signal_fence(memory_order_acquire);
85 atomic_signal_fence(memory_order_release);
86 atomic_signal_fence(memory_order_acq_rel);
87 atomic_signal_fence(memory_order_seq_cst);
88}
89
90TEST(stdatomic, atomic_is_lock_free) {
91 atomic_char small;
Elliott Hughese6c57fc2014-05-23 20:06:03 -070092 ASSERT_TRUE(atomic_is_lock_free(&small));
Hans Boehm32429602014-08-28 15:21:32 -070093 atomic_intmax_t big;
Elliott Hughese6c57fc2014-05-23 20:06:03 -070094 ASSERT_TRUE(atomic_is_lock_free(&big));
95}
96
97TEST(stdatomic, atomic_flag) {
98 atomic_flag f = ATOMIC_FLAG_INIT;
99 ASSERT_FALSE(atomic_flag_test_and_set(&f));
100 ASSERT_TRUE(atomic_flag_test_and_set(&f));
101
102 atomic_flag_clear(&f);
103
104 ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
105 ASSERT_TRUE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
106
107 atomic_flag_clear_explicit(&f, memory_order_relaxed);
108 ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
109}
110
111TEST(stdatomic, atomic_store) {
112 atomic_int i;
113 atomic_store(&i, 123);
114 ASSERT_EQ(123, atomic_load(&i));
115 atomic_store_explicit(&i, 123, memory_order_relaxed);
116 ASSERT_EQ(123, atomic_load_explicit(&i, memory_order_relaxed));
117}
118
119TEST(stdatomic, atomic_exchange) {
120 atomic_int i;
121 atomic_store(&i, 123);
122 ASSERT_EQ(123, atomic_exchange(&i, 456));
123 ASSERT_EQ(456, atomic_exchange_explicit(&i, 123, memory_order_relaxed));
124}
125
126TEST(stdatomic, atomic_compare_exchange) {
127 atomic_int i;
Dan Albert6b3beb22014-05-28 16:27:32 -0700128 int expected;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700129
130 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700131 expected = 123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700132 ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
133 ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
Dan Albert6b3beb22014-05-28 16:27:32 -0700134 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700135
136 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700137 expected = 123;
Hans Boehm590a4102017-04-04 17:34:59 -0700138 ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
139 memory_order_relaxed));
140 ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed,
141 memory_order_relaxed));
Dan Albert6b3beb22014-05-28 16:27:32 -0700142 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700143
144 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700145 expected = 123;
Hans Boehm590a4102017-04-04 17:34:59 -0700146 int iter_count = 0;
147 do {
148 ++iter_count;
149 ASSERT_LT(iter_count, 100); // Arbitrary limit on spurious compare_exchange failures.
150 ASSERT_EQ(expected, 123);
151 } while(!atomic_compare_exchange_weak(&i, &expected, 456));
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700152 ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
Dan Albert6b3beb22014-05-28 16:27:32 -0700153 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700154
155 atomic_store(&i, 123);
Dan Albert6b3beb22014-05-28 16:27:32 -0700156 expected = 123;
Hans Boehm590a4102017-04-04 17:34:59 -0700157 iter_count = 0;
158 do {
159 ++iter_count;
160 ASSERT_LT(iter_count, 100);
161 ASSERT_EQ(expected, 123);
162 } while(!atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
163 memory_order_relaxed));
164 ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed,
165 memory_order_relaxed));
Dan Albert6b3beb22014-05-28 16:27:32 -0700166 ASSERT_EQ(456, expected);
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700167}
168
169TEST(stdatomic, atomic_fetch_add) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700170 atomic_int i = 123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700171 ASSERT_EQ(123, atomic_fetch_add(&i, 1));
172 ASSERT_EQ(124, atomic_fetch_add_explicit(&i, 1, memory_order_relaxed));
173 ASSERT_EQ(125, atomic_load(&i));
174}
175
176TEST(stdatomic, atomic_fetch_sub) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700177 atomic_int i = 123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700178 ASSERT_EQ(123, atomic_fetch_sub(&i, 1));
179 ASSERT_EQ(122, atomic_fetch_sub_explicit(&i, 1, memory_order_relaxed));
180 ASSERT_EQ(121, atomic_load(&i));
181}
182
183TEST(stdatomic, atomic_fetch_or) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700184 atomic_int i = 0x100;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700185 ASSERT_EQ(0x100, atomic_fetch_or(&i, 0x020));
186 ASSERT_EQ(0x120, atomic_fetch_or_explicit(&i, 0x003, memory_order_relaxed));
187 ASSERT_EQ(0x123, atomic_load(&i));
188}
189
190TEST(stdatomic, atomic_fetch_xor) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700191 atomic_int i = 0x100;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700192 ASSERT_EQ(0x100, atomic_fetch_xor(&i, 0x120));
193 ASSERT_EQ(0x020, atomic_fetch_xor_explicit(&i, 0x103, memory_order_relaxed));
194 ASSERT_EQ(0x123, atomic_load(&i));
195}
196
197TEST(stdatomic, atomic_fetch_and) {
Christopher Ferris81ce8872024-09-19 16:20:21 -0700198 atomic_int i = 0x123;
Elliott Hughese6c57fc2014-05-23 20:06:03 -0700199 ASSERT_EQ(0x123, atomic_fetch_and(&i, 0x00f));
200 ASSERT_EQ(0x003, atomic_fetch_and_explicit(&i, 0x2, memory_order_relaxed));
201 ASSERT_EQ(0x002, atomic_load(&i));
202}
203
Hans Boehm00aaea32014-08-19 16:14:01 -0700204// And a rudimentary test of acquire-release memory ordering:
205
Hans Boehm71eb46f2023-11-13 15:55:05 -0800206static constexpr uint_least32_t BIG = 30'000'000ul;
207static_assert((BIG % 2) == 0); // Assumed below.
Hans Boehm00aaea32014-08-19 16:14:01 -0700208
209struct three_atomics {
210 atomic_uint_least32_t x;
211 char a[123]; // Everything in different cache lines,
212 // increase chance of compiler getting alignment wrong.
213 atomic_uint_least32_t y;
214 char b[4013];
215 atomic_uint_least32_t z;
216};
217
Hans Boehm71eb46f2023-11-13 15:55:05 -0800218atomic_bool read_enough(false);
219
Elliott Hughes68ae6ad2020-07-21 16:11:30 -0700220// Very simple acquire/release memory ordering smoke test.
Hans Boehm00aaea32014-08-19 16:14:01 -0700221static void* writer(void* arg) {
222 three_atomics* a = reinterpret_cast<three_atomics*>(arg);
223 for (uint_least32_t i = 0; i <= BIG; i+=2) {
224 atomic_store_explicit(&a->x, i, memory_order_relaxed);
225 atomic_store_explicit(&a->z, i, memory_order_relaxed);
226 atomic_store_explicit(&a->y, i, memory_order_release);
Hans Boehm71eb46f2023-11-13 15:55:05 -0800227
228 // Force stores to be visible in spite of being overwritten below.
229 asm volatile("" ::: "memory");
230
Hans Boehm00aaea32014-08-19 16:14:01 -0700231 atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
232 atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
233 atomic_store_explicit(&a->y, i+1, memory_order_release);
Hans Boehm71eb46f2023-11-13 15:55:05 -0800234 if (i >= BIG - 1000 && !atomic_load(&read_enough)) {
235 // Give reader a chance to catch up, at the expense of making the test
236 // less effective.
237 usleep(1000);
238 }
Hans Boehm00aaea32014-08-19 16:14:01 -0700239 }
Yi Kong32bc0fc2018-08-02 17:31:13 -0700240 return nullptr;
Hans Boehm00aaea32014-08-19 16:14:01 -0700241}
242
243static void* reader(void* arg) {
244 three_atomics* a = reinterpret_cast<three_atomics*>(arg);
245 uint_least32_t xval = 0, yval = 0, zval = 0;
246 size_t repeat = 0;
247 size_t repeat_limit = 1000;
248 while (yval != BIG + 1) {
249 yval = atomic_load_explicit(&a->y, memory_order_acquire);
250 zval = atomic_load_explicit(&a->z, memory_order_relaxed);
251 xval = atomic_load_explicit(&a->x, memory_order_relaxed);
252 // If we see a given value of y, the immediately preceding
253 // stores to z and x, or later ones, should also be visible.
254 if (zval < yval) {
255 // Cant just ASSERT, since we are in a non-void function.
256 ADD_FAILURE() << "acquire-release ordering violation: "
257 << zval << " < " << yval << ", " << xval << "\n";
Yi Kong32bc0fc2018-08-02 17:31:13 -0700258 return nullptr; // Only report once.
Hans Boehm00aaea32014-08-19 16:14:01 -0700259 }
260 if (xval < yval) {
261 // Cant just ASSERT, since we are in a non-void function.
262 ADD_FAILURE() << "acquire-release ordering violation: "
263 << xval << " < " << yval << ", " << zval << "\n";
Yi Kong32bc0fc2018-08-02 17:31:13 -0700264 return nullptr; // Only report once.
Hans Boehm00aaea32014-08-19 16:14:01 -0700265 }
Hans Boehm71eb46f2023-11-13 15:55:05 -0800266 if (repeat < repeat_limit) {
267 ++repeat;
268 } else if (!atomic_load_explicit(&read_enough, memory_order_relaxed)) {
269 atomic_store_explicit(&read_enough, true, memory_order_relaxed);
270 }
Hans Boehm00aaea32014-08-19 16:14:01 -0700271 }
272 // The following assertion is not technically guaranteed to hold.
273 // But if it fails to hold, this test was useless, and we have a
274 // serious scheduling issue that we should probably know about.
275 EXPECT_EQ(repeat, repeat_limit);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700276 return nullptr;
Hans Boehm00aaea32014-08-19 16:14:01 -0700277}
278
279TEST(stdatomic, ordering) {
Elliott Hughes68ae6ad2020-07-21 16:11:30 -0700280 // Run a memory ordering smoke test.
Hans Boehm00aaea32014-08-19 16:14:01 -0700281 void* result;
282 three_atomics a;
Nick Desaulniers2e65afe2024-11-19 09:27:06 -0800283 atomic_store_explicit(&a.x, 0ul, memory_order_relaxed);
284 atomic_store_explicit(&a.y, 0ul, memory_order_relaxed);
285 atomic_store_explicit(&a.z, 0ul, memory_order_relaxed);
Hans Boehm00aaea32014-08-19 16:14:01 -0700286 pthread_t t1,t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700287 ASSERT_EQ(0, pthread_create(&t1, nullptr, reader, &a));
288 ASSERT_EQ(0, pthread_create(&t2, nullptr, writer, &a));
Hans Boehm00aaea32014-08-19 16:14:01 -0700289 ASSERT_EQ(0, pthread_join(t1, &result));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700290 EXPECT_EQ(nullptr, result);
Hans Boehm00aaea32014-08-19 16:14:01 -0700291 ASSERT_EQ(0, pthread_join(t2, &result));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700292 EXPECT_EQ(nullptr, result);
Hans Boehm00aaea32014-08-19 16:14:01 -0700293 EXPECT_EQ(atomic_load_explicit(&a.x, memory_order_consume), BIG + 1);
294 EXPECT_EQ(atomic_load_explicit(&a.y, memory_order_seq_cst), BIG + 1);
295 EXPECT_EQ(atomic_load(&a.z), BIG + 1);
296}