blob: ced67f3e448eb62379234efa6e9ae86d6966f8b8 [file] [log] [blame]
Elliott Hughesbfeab1b2012-09-05 17:47:37 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <errno.h>
Elliott Hughes5b9310e2013-10-02 16:59:05 -070020#include <inttypes.h>
Elliott Hughesb95cf0d2013-07-15 14:51:07 -070021#include <limits.h>
Elliott Hughes04620a32014-03-07 17:59:05 -080022#include <malloc.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070023#include <pthread.h>
Christopher Ferrisf04935c2013-12-20 18:43:21 -080024#include <signal.h>
Yabin Cui140f3672015-02-03 10:32:00 -080025#include <stdio.h>
Colin Cross4c5595c2021-08-16 15:51:59 -070026#include <sys/cdefs.h>
Elliott Hughes70b24b12013-11-15 11:51:07 -080027#include <sys/mman.h>
Juan Yescas65af9a82023-12-07 11:35:21 -080028#include <sys/param.h>
Elliott Hughes4d098ca2016-04-11 12:43:05 -070029#include <sys/prctl.h>
George Burgess IV08fd0722019-01-15 19:00:11 -080030#include <sys/resource.h>
Elliott Hughes57b7a612014-08-25 17:26:50 -070031#include <sys/syscall.h>
Narayan Kamath51e6cb32014-03-03 15:38:51 +000032#include <time.h>
Elliott Hughes4d014e12012-09-07 16:47:54 -070033#include <unistd.h>
Yabin Cui33ac04a2015-09-22 11:16:15 -070034#include <unwind.h>
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070035
Yabin Cui08ee8d22015-02-11 17:04:36 -080036#include <atomic>
Josh Gaoddf757e2018-10-17 15:23:03 -070037#include <future>
Yabin Cuib5845722015-03-16 22:46:42 -070038#include <vector>
Yabin Cui08ee8d22015-02-11 17:04:36 -080039
Elliott Hughes5e62b342018-10-25 11:00:00 -070040#include <android-base/macros.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080041#include <android-base/parseint.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070042#include <android-base/scopeguard.h>
Elliott Hughes141b9172021-04-09 17:13:09 -070043#include <android-base/silent_death_test.h>
Yabin Cui6b9c85b2018-01-23 12:56:18 -080044#include <android-base/strings.h>
Florian Mayerf9666202023-02-28 14:26:06 -080045#include <android-base/test_utils.h>
Tom Cherryb8ab6182017-04-05 16:20:29 -070046
Yabin Cuic9a659c2015-11-05 15:36:08 -080047#include "private/bionic_constants.h"
Elliott Hugheseabc47b2024-11-08 22:05:00 +000048#include "private/bionic_time_conversions.h"
Elliott Hughes71ba5892018-02-07 12:44:45 -080049#include "SignalUtils.h"
Tamas Petz83223772025-05-13 11:49:29 +020050#include "sme_utils.h"
Elliott Hughes15dfd632015-09-22 16:40:14 -070051#include "utils.h"
52
Elliott Hughes141b9172021-04-09 17:13:09 -070053using pthread_DeathTest = SilentDeathTest;
Elliott Hughese657eb42021-02-18 17:11:56 -080054
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070055TEST(pthread, pthread_key_create) {
56 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -070057 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesbfeab1b2012-09-05 17:47:37 -070058 ASSERT_EQ(0, pthread_key_delete(key));
59 // Can't delete a key that's already been deleted.
60 ASSERT_EQ(EINVAL, pthread_key_delete(key));
61}
Elliott Hughes4d014e12012-09-07 16:47:54 -070062
Elliott Hughes1960f1c2025-02-26 14:46:52 -080063static std::vector<void*> example_key_destructor_data;
64static pthread_key_t example_key;
65static void example_key_destructor(void *data) {
66 // By the time the destructor function is running,
67 // this thread's value for the key should have been zeroed.
68 ASSERT_EQ(NULL, pthread_getspecific(example_key));
69
70 // Store the value so we can check we got the expected result.
71 example_key_destructor_data.push_back(data);
72}
73
74TEST(pthread, pthread_key_destructors) {
75 ASSERT_EQ(0, pthread_key_create(&example_key, example_key_destructor));
76
77 // Check that the destructor isn't called for a default null value.
78 std::thread([]() {}).join();
79 ASSERT_TRUE(example_key_destructor_data.empty());
80
81 // Check that the destructor isn't called for an explicit null value.
82 std::thread([]() {
83 ASSERT_EQ(0, pthread_setspecific(example_key, (void*) 1234));
84 ASSERT_EQ(0, pthread_setspecific(example_key, nullptr));
85 }).join();
86 ASSERT_TRUE(example_key_destructor_data.empty());
87
88 // Check that the destructor is called for a non-null value.
89 std::thread([]() { ASSERT_EQ(0, pthread_setspecific(example_key, (void*) 1234)); }).join();
90 ASSERT_EQ(1u, example_key_destructor_data.size());
91 ASSERT_EQ((void*) 1234, example_key_destructor_data[0]);
92
93 ASSERT_EQ(0, pthread_key_delete(example_key));
94}
95
Dan Albertc4bcc752014-09-30 11:48:24 -070096TEST(pthread, pthread_keys_max) {
Yabin Cui6c238f22014-12-11 20:50:41 -080097 // POSIX says PTHREAD_KEYS_MAX should be at least _POSIX_THREAD_KEYS_MAX.
98 ASSERT_GE(PTHREAD_KEYS_MAX, _POSIX_THREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -070099}
Elliott Hughes718a5b52014-01-28 17:02:03 -0800100
Yabin Cui6c238f22014-12-11 20:50:41 -0800101TEST(pthread, sysconf_SC_THREAD_KEYS_MAX_eq_PTHREAD_KEYS_MAX) {
Dan Albertc4bcc752014-09-30 11:48:24 -0700102 int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
Yabin Cui6c238f22014-12-11 20:50:41 -0800103 ASSERT_EQ(sysconf_max, PTHREAD_KEYS_MAX);
Dan Albertc4bcc752014-09-30 11:48:24 -0700104}
105
106TEST(pthread, pthread_key_many_distinct) {
Yabin Cui6c238f22014-12-11 20:50:41 -0800107 // As gtest uses pthread keys, we can't allocate exactly PTHREAD_KEYS_MAX
108 // pthread keys, but We should be able to allocate at least this many keys.
109 int nkeys = PTHREAD_KEYS_MAX / 2;
Dan Albertc4bcc752014-09-30 11:48:24 -0700110 std::vector<pthread_key_t> keys;
111
Tom Cherryb8ab6182017-04-05 16:20:29 -0700112 auto scope_guard = android::base::make_scope_guard([&keys] {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -0700113 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -0700114 EXPECT_EQ(0, pthread_key_delete(key));
115 }
116 });
117
118 for (int i = 0; i < nkeys; ++i) {
119 pthread_key_t key;
Elliott Hughes61706932015-03-31 10:56:58 -0700120 // If this fails, it's likely that LIBC_PTHREAD_KEY_RESERVED_COUNT is wrong.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700121 ASSERT_EQ(0, pthread_key_create(&key, nullptr)) << i << " of " << nkeys;
Dan Albertc4bcc752014-09-30 11:48:24 -0700122 keys.push_back(key);
123 ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
124 }
125
126 for (int i = keys.size() - 1; i >= 0; --i) {
127 ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
128 pthread_key_t key = keys.back();
129 keys.pop_back();
130 ASSERT_EQ(0, pthread_key_delete(key));
131 }
132}
133
Yabin Cui6c238f22014-12-11 20:50:41 -0800134TEST(pthread, pthread_key_not_exceed_PTHREAD_KEYS_MAX) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000135 std::vector<pthread_key_t> keys;
Dan Albertc4bcc752014-09-30 11:48:24 -0700136 int rv = 0;
Yabin Cui6c238f22014-12-11 20:50:41 -0800137
138 // Pthread keys are used by gtest, so PTHREAD_KEYS_MAX should
139 // be more than we are allowed to allocate now.
140 for (int i = 0; i < PTHREAD_KEYS_MAX; i++) {
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000141 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700142 rv = pthread_key_create(&key, nullptr);
Dan Albertc4bcc752014-09-30 11:48:24 -0700143 if (rv == EAGAIN) {
144 break;
145 }
146 EXPECT_EQ(0, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000147 keys.push_back(key);
148 }
149
Dan Albertc4bcc752014-09-30 11:48:24 -0700150 // Don't leak keys.
Elliott Hughes0b2acdf2015-10-02 18:25:19 -0700151 for (const auto& key : keys) {
Dan Albertc4bcc752014-09-30 11:48:24 -0700152 EXPECT_EQ(0, pthread_key_delete(key));
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000153 }
Dan Albertc4bcc752014-09-30 11:48:24 -0700154 keys.clear();
155
156 // We should have eventually reached the maximum number of keys and received
157 // EAGAIN.
158 ASSERT_EQ(EAGAIN, rv);
Elliott Hughes44b53ad2013-02-11 20:18:47 +0000159}
160
Elliott Hughesebb770f2014-06-25 13:46:46 -0700161TEST(pthread, pthread_key_delete) {
162 void* expected = reinterpret_cast<void*>(1234);
163 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700164 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700165 ASSERT_EQ(0, pthread_setspecific(key, expected));
166 ASSERT_EQ(expected, pthread_getspecific(key));
167 ASSERT_EQ(0, pthread_key_delete(key));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700168 // After deletion, pthread_getspecific returns nullptr.
169 ASSERT_EQ(nullptr, pthread_getspecific(key));
Elliott Hughesebb770f2014-06-25 13:46:46 -0700170 // And you can't use pthread_setspecific with the deleted key.
171 ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
172}
173
Elliott Hughes40a52172014-07-30 14:48:10 -0700174TEST(pthread, pthread_key_fork) {
175 void* expected = reinterpret_cast<void*>(1234);
176 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700177 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700178 ASSERT_EQ(0, pthread_setspecific(key, expected));
179 ASSERT_EQ(expected, pthread_getspecific(key));
180
181 pid_t pid = fork();
182 ASSERT_NE(-1, pid) << strerror(errno);
183
184 if (pid == 0) {
185 // The surviving thread inherits all the forking thread's TLS values...
186 ASSERT_EQ(expected, pthread_getspecific(key));
187 _exit(99);
188 }
189
Elliott Hughes33697a02016-01-26 13:04:57 -0800190 AssertChildExited(pid, 99);
Elliott Hughes40a52172014-07-30 14:48:10 -0700191
192 ASSERT_EQ(expected, pthread_getspecific(key));
Dan Albert1d53ae22014-09-02 15:24:26 -0700193 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700194}
195
196static void* DirtyKeyFn(void* key) {
197 return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
198}
199
200TEST(pthread, pthread_key_dirty) {
201 pthread_key_t key;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700202 ASSERT_EQ(0, pthread_key_create(&key, nullptr));
Elliott Hughes40a52172014-07-30 14:48:10 -0700203
Yabin Cuia36158a2015-11-16 21:06:16 -0800204 size_t stack_size = 640 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700205 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
Elliott Hughes40a52172014-07-30 14:48:10 -0700206 ASSERT_NE(MAP_FAILED, stack);
207 memset(stack, 0xff, stack_size);
208
209 pthread_attr_t attr;
210 ASSERT_EQ(0, pthread_attr_init(&attr));
211 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
212
213 pthread_t t;
214 ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
215
216 void* result;
217 ASSERT_EQ(0, pthread_join(t, &result));
218 ASSERT_EQ(nullptr, result); // Not ~0!
219
220 ASSERT_EQ(0, munmap(stack, stack_size));
Dan Albert1d53ae22014-09-02 15:24:26 -0700221 ASSERT_EQ(0, pthread_key_delete(key));
Elliott Hughes40a52172014-07-30 14:48:10 -0700222}
223
Florian Mayerf9666202023-02-28 14:26:06 -0800224static void* FnWithStackFrame(void*) {
225 int x;
226 *const_cast<volatile int*>(&x) = 1;
227 return nullptr;
228}
229
230TEST(pthread, pthread_heap_allocated_stack) {
231 SKIP_WITH_HWASAN; // TODO(b/148982147): Re-enable when fixed.
232
233 size_t stack_size = 640 * 1024;
Elliott Hughes18e335b2023-04-21 11:18:40 -0700234 std::unique_ptr<char[]> stack(new (std::align_val_t(getpagesize())) char[stack_size]);
235 memset(stack.get(), '\xff', stack_size);
Florian Mayerf9666202023-02-28 14:26:06 -0800236
237 pthread_attr_t attr;
238 ASSERT_EQ(0, pthread_attr_init(&attr));
Elliott Hughes18e335b2023-04-21 11:18:40 -0700239 ASSERT_EQ(0, pthread_attr_setstack(&attr, stack.get(), stack_size));
Florian Mayerf9666202023-02-28 14:26:06 -0800240
241 pthread_t t;
242 ASSERT_EQ(0, pthread_create(&t, &attr, FnWithStackFrame, nullptr));
243
244 void* result;
245 ASSERT_EQ(0, pthread_join(t, &result));
246}
247
Yabin Cui5ddbb3f2015-03-05 20:35:32 -0800248TEST(pthread, static_pthread_key_used_before_creation) {
249#if defined(__BIONIC__)
250 // See http://b/19625804. The bug is about a static/global pthread key being used before creation.
251 // So here tests if the static/global default value 0 can be detected as invalid key.
252 static pthread_key_t key;
253 ASSERT_EQ(nullptr, pthread_getspecific(key));
254 ASSERT_EQ(EINVAL, pthread_setspecific(key, nullptr));
255 ASSERT_EQ(EINVAL, pthread_key_delete(key));
256#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -0800257 GTEST_SKIP() << "bionic-only test";
Yabin Cui5ddbb3f2015-03-05 20:35:32 -0800258#endif
259}
260
Elliott Hughes4d014e12012-09-07 16:47:54 -0700261static void* IdFn(void* arg) {
262 return arg;
263}
264
Yabin Cui63481602014-12-01 17:41:04 -0800265class SpinFunctionHelper {
266 public:
267 SpinFunctionHelper() {
268 SpinFunctionHelper::spin_flag_ = true;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400269 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700270
Yabin Cui63481602014-12-01 17:41:04 -0800271 ~SpinFunctionHelper() {
272 UnSpin();
273 }
Elliott Hughes0bd9d132017-11-02 13:11:13 -0700274
Yabin Cui63481602014-12-01 17:41:04 -0800275 auto GetFunction() -> void* (*)(void*) {
276 return SpinFunctionHelper::SpinFn;
277 }
278
279 void UnSpin() {
280 SpinFunctionHelper::spin_flag_ = false;
281 }
282
283 private:
284 static void* SpinFn(void*) {
285 while (spin_flag_) {}
Yi Kong32bc0fc2018-08-02 17:31:13 -0700286 return nullptr;
Yabin Cui63481602014-12-01 17:41:04 -0800287 }
Yabin Cuia36158a2015-11-16 21:06:16 -0800288 static std::atomic<bool> spin_flag_;
Yabin Cui63481602014-12-01 17:41:04 -0800289};
290
291// It doesn't matter if spin_flag_ is used in several tests,
292// because it is always set to false after each test. Each thread
293// loops on spin_flag_ can find it becomes false at some time.
Yabin Cuia36158a2015-11-16 21:06:16 -0800294std::atomic<bool> SpinFunctionHelper::spin_flag_;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400295
Elliott Hughes4d014e12012-09-07 16:47:54 -0700296static void* JoinFn(void* arg) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700297 return reinterpret_cast<void*>(pthread_join(reinterpret_cast<pthread_t>(arg), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700298}
299
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400300static void AssertDetached(pthread_t t, bool is_detached) {
301 pthread_attr_t attr;
302 ASSERT_EQ(0, pthread_getattr_np(t, &attr));
303 int detach_state;
304 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &detach_state));
305 pthread_attr_destroy(&attr);
306 ASSERT_EQ(is_detached, (detach_state == PTHREAD_CREATE_DETACHED));
307}
308
Elliott Hughes7484c212017-02-02 02:41:38 +0000309static void MakeDeadThread(pthread_t& t) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700310 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, nullptr));
311 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000312}
313
Elliott Hughes4d014e12012-09-07 16:47:54 -0700314TEST(pthread, pthread_create) {
315 void* expected_result = reinterpret_cast<void*>(123);
316 // Can we create a thread?
317 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700318 ASSERT_EQ(0, pthread_create(&t, nullptr, IdFn, expected_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700319 // If we join, do we get the expected value back?
320 void* result;
321 ASSERT_EQ(0, pthread_join(t, &result));
322 ASSERT_EQ(expected_result, result);
323}
324
Elliott Hughes3e898472013-02-12 16:40:24 +0000325TEST(pthread, pthread_create_EAGAIN) {
326 pthread_attr_t attributes;
327 ASSERT_EQ(0, pthread_attr_init(&attributes));
328 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, static_cast<size_t>(-1) & ~(getpagesize() - 1)));
329
330 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700331 ASSERT_EQ(EAGAIN, pthread_create(&t, &attributes, IdFn, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000332}
333
Elliott Hughes4d014e12012-09-07 16:47:54 -0700334TEST(pthread, pthread_no_join_after_detach) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700335 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800336
Elliott Hughes4d014e12012-09-07 16:47:54 -0700337 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700338 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700339
340 // After a pthread_detach...
341 ASSERT_EQ(0, pthread_detach(t1));
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400342 AssertDetached(t1, true);
Elliott Hughes4d014e12012-09-07 16:47:54 -0700343
344 // ...pthread_join should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700345 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700346}
347
348TEST(pthread, pthread_no_op_detach_after_join) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700349 SpinFunctionHelper spin_helper;
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400350
Elliott Hughes4d014e12012-09-07 16:47:54 -0700351 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700352 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700353
354 // If thread 2 is already waiting to join thread 1...
355 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700356 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700357
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400358 sleep(1); // (Give t2 a chance to call pthread_join.)
Elliott Hughes4d014e12012-09-07 16:47:54 -0700359
Yabin Cuibbb04322015-03-19 15:19:25 -0700360#if defined(__BIONIC__)
361 ASSERT_EQ(EINVAL, pthread_detach(t1));
362#else
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400363 ASSERT_EQ(0, pthread_detach(t1));
Yabin Cuibbb04322015-03-19 15:19:25 -0700364#endif
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400365 AssertDetached(t1, false);
366
Elliott Hughes725b2a92016-03-23 11:20:47 -0700367 spin_helper.UnSpin();
Sergey Melnikov10ce9692012-10-26 14:06:43 +0400368
369 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
Elliott Hughes4d014e12012-09-07 16:47:54 -0700370 void* join_result;
371 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700372 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes4d014e12012-09-07 16:47:54 -0700373}
Elliott Hughes14f19592012-10-29 10:19:44 -0700374
375TEST(pthread, pthread_join_self) {
Yi Kong32bc0fc2018-08-02 17:31:13 -0700376 ASSERT_EQ(EDEADLK, pthread_join(pthread_self(), nullptr));
Elliott Hughes14f19592012-10-29 10:19:44 -0700377}
Elliott Hughes4f251be2012-11-01 16:33:29 -0700378
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800379struct TestBug37410 {
380 pthread_t main_thread;
381 pthread_mutex_t mutex;
Elliott Hughes4f251be2012-11-01 16:33:29 -0700382
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800383 static void main() {
384 TestBug37410 data;
385 data.main_thread = pthread_self();
Yi Kong32bc0fc2018-08-02 17:31:13 -0700386 ASSERT_EQ(0, pthread_mutex_init(&data.mutex, nullptr));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800387 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
388
389 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700390 ASSERT_EQ(0, pthread_create(&t, nullptr, TestBug37410::thread_fn, reinterpret_cast<void*>(&data)));
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800391
392 // Wait for the thread to be running...
393 ASSERT_EQ(0, pthread_mutex_lock(&data.mutex));
394 ASSERT_EQ(0, pthread_mutex_unlock(&data.mutex));
395
396 // ...and exit.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700397 pthread_exit(nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800398 }
399
400 private:
401 static void* thread_fn(void* arg) {
402 TestBug37410* data = reinterpret_cast<TestBug37410*>(arg);
403
Evgenii Stepanov352853a2019-02-05 17:37:37 -0800404 // Unlocking data->mutex will cause the main thread to exit, invalidating *data. Save the handle.
405 pthread_t main_thread = data->main_thread;
406
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800407 // Let the main thread know we're running.
408 pthread_mutex_unlock(&data->mutex);
409
410 // And wait for the main thread to exit.
Evgenii Stepanov352853a2019-02-05 17:37:37 -0800411 pthread_join(main_thread, nullptr);
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800412
Yi Kong32bc0fc2018-08-02 17:31:13 -0700413 return nullptr;
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800414 }
415};
Elliott Hughes4f251be2012-11-01 16:33:29 -0700416
Elliott Hughes7fd803c2013-02-14 16:33:52 -0800417// Even though this isn't really a death test, we have to say "DeathTest" here so gtest knows to
418// run this test (which exits normally) in its own process.
Yabin Cui9df70402014-11-05 18:01:01 -0800419TEST_F(pthread_DeathTest, pthread_bug_37410) {
Elliott Hughes4f251be2012-11-01 16:33:29 -0700420 // http://code.google.com/p/android/issues/detail?id=37410
Elliott Hughes877ec6d2013-11-15 17:40:18 -0800421 ASSERT_EXIT(TestBug37410::main(), ::testing::ExitedWithCode(0), "");
Elliott Hughes4f251be2012-11-01 16:33:29 -0700422}
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800423
424static void* SignalHandlerFn(void* arg) {
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800425 sigset64_t wait_set;
426 sigfillset64(&wait_set);
427 return reinterpret_cast<void*>(sigwait64(&wait_set, reinterpret_cast<int*>(arg)));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800428}
429
430TEST(pthread, pthread_sigmask) {
Elliott Hughes19e62322013-10-15 11:23:57 -0700431 // Check that SIGUSR1 isn't blocked.
432 sigset_t original_set;
433 sigemptyset(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700434 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700435 ASSERT_FALSE(sigismember(&original_set, SIGUSR1));
436
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800437 // Block SIGUSR1.
438 sigset_t set;
439 sigemptyset(&set);
440 sigaddset(&set, SIGUSR1);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700441 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, &set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800442
Elliott Hughes19e62322013-10-15 11:23:57 -0700443 // Check that SIGUSR1 is blocked.
444 sigset_t final_set;
445 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700446 ASSERT_EQ(0, pthread_sigmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700447 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
448 // ...and that sigprocmask agrees with pthread_sigmask.
449 sigemptyset(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700450 ASSERT_EQ(0, sigprocmask(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes19e62322013-10-15 11:23:57 -0700451 ASSERT_TRUE(sigismember(&final_set, SIGUSR1));
452
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800453 // Spawn a thread that calls sigwait and tells us what it received.
454 pthread_t signal_thread;
455 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700456 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800457
458 // Send that thread SIGUSR1.
459 pthread_kill(signal_thread, SIGUSR1);
460
461 // See what it got.
462 void* join_result;
463 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
464 ASSERT_EQ(SIGUSR1, received_signal);
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700465 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
Elliott Hughes19e62322013-10-15 11:23:57 -0700466
467 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700468 ASSERT_EQ(0, pthread_sigmask(SIG_SETMASK, &original_set, nullptr));
Elliott Hughesc5d028f2013-01-10 14:42:14 -0800469}
Elliott Hughes5e3fc432013-02-11 16:36:48 -0800470
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800471TEST(pthread, pthread_sigmask64_SIGTRMIN) {
472 // Check that SIGRTMIN isn't blocked.
473 sigset64_t original_set;
474 sigemptyset64(&original_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700475 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &original_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800476 ASSERT_FALSE(sigismember64(&original_set, SIGRTMIN));
477
478 // Block SIGRTMIN.
479 sigset64_t set;
480 sigemptyset64(&set);
481 sigaddset64(&set, SIGRTMIN);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700482 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, &set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800483
484 // Check that SIGRTMIN is blocked.
485 sigset64_t final_set;
486 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700487 ASSERT_EQ(0, pthread_sigmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800488 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
489 // ...and that sigprocmask64 agrees with pthread_sigmask64.
490 sigemptyset64(&final_set);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700491 ASSERT_EQ(0, sigprocmask64(SIG_BLOCK, nullptr, &final_set));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800492 ASSERT_TRUE(sigismember64(&final_set, SIGRTMIN));
493
494 // Spawn a thread that calls sigwait64 and tells us what it received.
495 pthread_t signal_thread;
496 int received_signal = -1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700497 ASSERT_EQ(0, pthread_create(&signal_thread, nullptr, SignalHandlerFn, &received_signal));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800498
499 // Send that thread SIGRTMIN.
500 pthread_kill(signal_thread, SIGRTMIN);
501
502 // See what it got.
503 void* join_result;
504 ASSERT_EQ(0, pthread_join(signal_thread, &join_result));
505 ASSERT_EQ(SIGRTMIN, received_signal);
506 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
507
508 // Restore the original signal mask.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700509 ASSERT_EQ(0, pthread_sigmask64(SIG_SETMASK, &original_set, nullptr));
Elliott Hughes5905d6f2018-01-30 15:09:51 -0800510}
511
Elliott Hughes725b2a92016-03-23 11:20:47 -0700512static void test_pthread_setname_np__pthread_getname_np(pthread_t t) {
513 ASSERT_EQ(0, pthread_setname_np(t, "short"));
514 char name[32];
515 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
516 ASSERT_STREQ("short", name);
517
Elliott Hughesd1aea302015-04-25 10:05:24 -0700518 // The limit is 15 characters --- the kernel's buffer is 16, but includes a NUL.
Elliott Hughes725b2a92016-03-23 11:20:47 -0700519 ASSERT_EQ(0, pthread_setname_np(t, "123456789012345"));
520 ASSERT_EQ(0, pthread_getname_np(t, name, sizeof(name)));
521 ASSERT_STREQ("123456789012345", name);
522
523 ASSERT_EQ(ERANGE, pthread_setname_np(t, "1234567890123456"));
524
525 // The passed-in buffer should be at least 16 bytes.
526 ASSERT_EQ(0, pthread_getname_np(t, name, 16));
527 ASSERT_EQ(ERANGE, pthread_getname_np(t, name, 15));
Elliott Hughes3e898472013-02-12 16:40:24 +0000528}
529
Elliott Hughes725b2a92016-03-23 11:20:47 -0700530TEST(pthread, pthread_setname_np__pthread_getname_np__self) {
531 test_pthread_setname_np__pthread_getname_np(pthread_self());
Elliott Hughes3e898472013-02-12 16:40:24 +0000532}
533
Elliott Hughes725b2a92016-03-23 11:20:47 -0700534TEST(pthread, pthread_setname_np__pthread_getname_np__other) {
535 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800536
Elliott Hughes725b2a92016-03-23 11:20:47 -0700537 pthread_t t;
Elliott Hughes4d098ca2016-04-11 12:43:05 -0700538 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
539 test_pthread_setname_np__pthread_getname_np(t);
540 spin_helper.UnSpin();
541 ASSERT_EQ(0, pthread_join(t, nullptr));
542}
543
544// http://b/28051133: a kernel misfeature means that you can't change the
545// name of another thread if you've set PR_SET_DUMPABLE to 0.
546TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
547 ASSERT_EQ(0, prctl(PR_SET_DUMPABLE, 0)) << strerror(errno);
548
549 SpinFunctionHelper spin_helper;
550
551 pthread_t t;
552 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700553 test_pthread_setname_np__pthread_getname_np(t);
554 spin_helper.UnSpin();
555 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes3e898472013-02-12 16:40:24 +0000556}
557
Elliott Hughes11859d42017-02-13 17:59:29 -0800558TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000559 pthread_t dead_thread;
560 MakeDeadThread(dead_thread);
561
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800562 EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"),
563 "invalid pthread_t (.*) passed to pthread_setname_np");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800564}
565
566TEST_F(pthread_DeathTest, pthread_setname_np__null_thread) {
567 pthread_t null_thread = 0;
568 EXPECT_EQ(ENOENT, pthread_setname_np(null_thread, "short 3"));
Elliott Hughes11859d42017-02-13 17:59:29 -0800569}
570
571TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
572 pthread_t dead_thread;
573 MakeDeadThread(dead_thread);
574
Elliott Hughesbcb15292017-02-07 21:05:30 +0000575 char name[64];
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800576 EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)),
577 "invalid pthread_t (.*) passed to pthread_getname_np");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800578}
579
580TEST_F(pthread_DeathTest, pthread_getname_np__null_thread) {
581 pthread_t null_thread = 0;
582
583 char name[64];
584 EXPECT_EQ(ENOENT, pthread_getname_np(null_thread, name, sizeof(name)));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000585}
586
Elliott Hughes9d23e042013-02-15 19:21:51 -0800587TEST(pthread, pthread_kill__0) {
588 // Signal 0 just tests that the thread exists, so it's safe to call on ourselves.
589 ASSERT_EQ(0, pthread_kill(pthread_self(), 0));
590}
591
592TEST(pthread, pthread_kill__invalid_signal) {
593 ASSERT_EQ(EINVAL, pthread_kill(pthread_self(), -1));
594}
595
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800596static void pthread_kill__in_signal_handler_helper(int signal_number) {
597 static int count = 0;
598 ASSERT_EQ(SIGALRM, signal_number);
599 if (++count == 1) {
600 // Can we call pthread_kill from a signal handler?
601 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
602 }
603}
604
605TEST(pthread, pthread_kill__in_signal_handler) {
Elliott Hughes4b558f52014-03-04 15:58:02 -0800606 ScopedSignalHandler ssh(SIGALRM, pthread_kill__in_signal_handler_helper);
Elliott Hughesfae89fc2013-02-21 11:22:23 -0800607 ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
608}
609
Josh Gaoddf757e2018-10-17 15:23:03 -0700610TEST(pthread, pthread_kill__exited_thread) {
611 static std::promise<pid_t> tid_promise;
612 pthread_t thread;
613 ASSERT_EQ(0, pthread_create(&thread, nullptr,
614 [](void*) -> void* {
615 tid_promise.set_value(gettid());
616 return nullptr;
617 },
618 nullptr));
619
620 pid_t tid = tid_promise.get_future().get();
621 while (TEMP_FAILURE_RETRY(syscall(__NR_tgkill, getpid(), tid, 0)) != -1) {
622 continue;
623 }
Elliott Hughes95646e62023-09-21 14:11:19 -0700624 ASSERT_ERRNO(ESRCH);
Josh Gaoddf757e2018-10-17 15:23:03 -0700625
626 ASSERT_EQ(ESRCH, pthread_kill(thread, 0));
627}
628
Elliott Hughes11859d42017-02-13 17:59:29 -0800629TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000630 pthread_t dead_thread;
631 MakeDeadThread(dead_thread);
632
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800633 EXPECT_DEATH(pthread_detach(dead_thread),
634 "invalid pthread_t (.*) passed to pthread_detach");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800635}
636
637TEST_F(pthread_DeathTest, pthread_detach__null_thread) {
638 pthread_t null_thread = 0;
639 EXPECT_EQ(ESRCH, pthread_detach(null_thread));
Elliott Hughes7484c212017-02-02 02:41:38 +0000640}
641
Jeff Hao9b06cc32013-08-15 14:51:16 -0700642TEST(pthread, pthread_getcpuclockid__clock_gettime) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700643 SpinFunctionHelper spin_helper;
Yabin Cui63481602014-12-01 17:41:04 -0800644
Jeff Hao9b06cc32013-08-15 14:51:16 -0700645 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700646 ASSERT_EQ(0, pthread_create(&t, nullptr, spin_helper.GetFunction(), nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700647
648 clockid_t c;
649 ASSERT_EQ(0, pthread_getcpuclockid(t, &c));
650 timespec ts;
651 ASSERT_EQ(0, clock_gettime(c, &ts));
Elliott Hughes725b2a92016-03-23 11:20:47 -0700652 spin_helper.UnSpin();
Yabin Cuia36158a2015-11-16 21:06:16 -0800653 ASSERT_EQ(0, pthread_join(t, nullptr));
Jeff Hao9b06cc32013-08-15 14:51:16 -0700654}
655
Elliott Hughes11859d42017-02-13 17:59:29 -0800656TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000657 pthread_t dead_thread;
658 MakeDeadThread(dead_thread);
659
660 clockid_t c;
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800661 EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c),
662 "invalid pthread_t (.*) passed to pthread_getcpuclockid");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800663}
664
665TEST_F(pthread_DeathTest, pthread_getcpuclockid__null_thread) {
666 pthread_t null_thread = 0;
667 clockid_t c;
668 EXPECT_EQ(ESRCH, pthread_getcpuclockid(null_thread, &c));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000669}
670
Elliott Hughes11859d42017-02-13 17:59:29 -0800671TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000672 pthread_t dead_thread;
673 MakeDeadThread(dead_thread);
674
675 int policy;
676 sched_param param;
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800677 EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, &param),
678 "invalid pthread_t (.*) passed to pthread_getschedparam");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800679}
680
681TEST_F(pthread_DeathTest, pthread_getschedparam__null_thread) {
682 pthread_t null_thread = 0;
683 int policy;
684 sched_param param;
685 EXPECT_EQ(ESRCH, pthread_getschedparam(null_thread, &policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000686}
687
Elliott Hughes11859d42017-02-13 17:59:29 -0800688TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
Elliott Hughesbcb15292017-02-07 21:05:30 +0000689 pthread_t dead_thread;
690 MakeDeadThread(dead_thread);
691
692 int policy = 0;
693 sched_param param;
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800694 EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, &param),
695 "invalid pthread_t (.*) passed to pthread_setschedparam");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800696}
697
698TEST_F(pthread_DeathTest, pthread_setschedparam__null_thread) {
699 pthread_t null_thread = 0;
700 int policy = 0;
701 sched_param param;
702 EXPECT_EQ(ESRCH, pthread_setschedparam(null_thread, policy, &param));
Elliott Hughesbcb15292017-02-07 21:05:30 +0000703}
704
Elliott Hughesdff08ce2017-10-16 09:58:45 -0700705TEST_F(pthread_DeathTest, pthread_setschedprio__no_such_thread) {
706 pthread_t dead_thread;
707 MakeDeadThread(dead_thread);
708
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800709 EXPECT_DEATH(pthread_setschedprio(dead_thread, 123),
710 "invalid pthread_t (.*) passed to pthread_setschedprio");
Elliott Hughesdff08ce2017-10-16 09:58:45 -0700711}
712
713TEST_F(pthread_DeathTest, pthread_setschedprio__null_thread) {
714 pthread_t null_thread = 0;
715 EXPECT_EQ(ESRCH, pthread_setschedprio(null_thread, 123));
716}
717
Elliott Hughes11859d42017-02-13 17:59:29 -0800718TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000719 pthread_t dead_thread;
720 MakeDeadThread(dead_thread);
721
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800722 EXPECT_DEATH(pthread_join(dead_thread, nullptr),
723 "invalid pthread_t (.*) passed to pthread_join");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800724}
725
726TEST_F(pthread_DeathTest, pthread_join__null_thread) {
727 pthread_t null_thread = 0;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700728 EXPECT_EQ(ESRCH, pthread_join(null_thread, nullptr));
Elliott Hughes7484c212017-02-02 02:41:38 +0000729}
730
Elliott Hughes11859d42017-02-13 17:59:29 -0800731TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
Elliott Hughes7484c212017-02-02 02:41:38 +0000732 pthread_t dead_thread;
733 MakeDeadThread(dead_thread);
734
Elliott Hughes5bb113c2019-02-01 16:31:10 -0800735 EXPECT_DEATH(pthread_kill(dead_thread, 0),
736 "invalid pthread_t (.*) passed to pthread_kill");
Elliott Hughes6ce686c2017-02-21 13:15:20 -0800737}
738
739TEST_F(pthread_DeathTest, pthread_kill__null_thread) {
740 pthread_t null_thread = 0;
741 EXPECT_EQ(ESRCH, pthread_kill(null_thread, 0));
Elliott Hughes7484c212017-02-02 02:41:38 +0000742}
743
msg5550f020d12013-06-06 14:59:28 -0400744TEST(pthread, pthread_join__multijoin) {
Elliott Hughes725b2a92016-03-23 11:20:47 -0700745 SpinFunctionHelper spin_helper;
msg5550f020d12013-06-06 14:59:28 -0400746
747 pthread_t t1;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700748 ASSERT_EQ(0, pthread_create(&t1, nullptr, spin_helper.GetFunction(), nullptr));
msg5550f020d12013-06-06 14:59:28 -0400749
750 pthread_t t2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700751 ASSERT_EQ(0, pthread_create(&t2, nullptr, JoinFn, reinterpret_cast<void*>(t1)));
msg5550f020d12013-06-06 14:59:28 -0400752
753 sleep(1); // (Give t2 a chance to call pthread_join.)
754
755 // Multiple joins to the same thread should fail.
Yi Kong32bc0fc2018-08-02 17:31:13 -0700756 ASSERT_EQ(EINVAL, pthread_join(t1, nullptr));
msg5550f020d12013-06-06 14:59:28 -0400757
Elliott Hughes725b2a92016-03-23 11:20:47 -0700758 spin_helper.UnSpin();
msg5550f020d12013-06-06 14:59:28 -0400759
760 // ...but t2's join on t1 still goes ahead (which we can tell because our join on t2 finishes).
761 void* join_result;
762 ASSERT_EQ(0, pthread_join(t2, &join_result));
Elliott Hughes5b9310e2013-10-02 16:59:05 -0700763 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(join_result));
msg5550f020d12013-06-06 14:59:28 -0400764}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700765
Elliott Hughes70b24b12013-11-15 11:51:07 -0800766TEST(pthread, pthread_join__race) {
767 // http://b/11693195 --- pthread_join could return before the thread had actually exited.
768 // If the joiner unmapped the thread's stack, that could lead to SIGSEGV in the thread.
769 for (size_t i = 0; i < 1024; ++i) {
Yabin Cuia36158a2015-11-16 21:06:16 -0800770 size_t stack_size = 640*1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700771 void* stack = mmap(nullptr, stack_size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
Elliott Hughes70b24b12013-11-15 11:51:07 -0800772
773 pthread_attr_t a;
774 pthread_attr_init(&a);
775 pthread_attr_setstack(&a, stack, stack_size);
776
777 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700778 ASSERT_EQ(0, pthread_create(&t, &a, IdFn, nullptr));
779 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes70b24b12013-11-15 11:51:07 -0800780 ASSERT_EQ(0, munmap(stack, stack_size));
781 }
782}
783
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700784static void* GetActualGuardSizeFn(void* arg) {
785 pthread_attr_t attributes;
786 pthread_getattr_np(pthread_self(), &attributes);
787 pthread_attr_getguardsize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700788 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700789}
790
791static size_t GetActualGuardSize(const pthread_attr_t& attributes) {
792 size_t result;
793 pthread_t t;
794 pthread_create(&t, &attributes, GetActualGuardSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700795 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700796 return result;
797}
798
799static void* GetActualStackSizeFn(void* arg) {
800 pthread_attr_t attributes;
801 pthread_getattr_np(pthread_self(), &attributes);
802 pthread_attr_getstacksize(&attributes, reinterpret_cast<size_t*>(arg));
Yi Kong32bc0fc2018-08-02 17:31:13 -0700803 return nullptr;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700804}
805
806static size_t GetActualStackSize(const pthread_attr_t& attributes) {
807 size_t result;
808 pthread_t t;
809 pthread_create(&t, &attributes, GetActualStackSizeFn, &result);
Yi Kong32bc0fc2018-08-02 17:31:13 -0700810 pthread_join(t, nullptr);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700811 return result;
812}
813
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700814TEST(pthread, pthread_attr_setguardsize_tiny) {
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700815 pthread_attr_t attributes;
816 ASSERT_EQ(0, pthread_attr_init(&attributes));
817
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700818 // No such thing as too small: will be rounded up to one page by pthread_create.
819 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 128));
820 size_t guard_size;
821 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
822 ASSERT_EQ(128U, guard_size);
Juan Yescas65af9a82023-12-07 11:35:21 -0800823 ASSERT_EQ(static_cast<unsigned long>(getpagesize()), GetActualGuardSize(attributes));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700824}
825
826TEST(pthread, pthread_attr_setguardsize_reasonable) {
827 pthread_attr_t attributes;
828 ASSERT_EQ(0, pthread_attr_init(&attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700829
830 // Large enough and a multiple of the page size.
831 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700832 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700833 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
834 ASSERT_EQ(32*1024U, guard_size);
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700835 ASSERT_EQ(32*1024U, GetActualGuardSize(attributes));
836}
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700837
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700838TEST(pthread, pthread_attr_setguardsize_needs_rounding) {
839 pthread_attr_t attributes;
840 ASSERT_EQ(0, pthread_attr_init(&attributes));
841
842 // Large enough but not a multiple of the page size.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700843 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024 + 1));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700844 size_t guard_size;
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700845 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
846 ASSERT_EQ(32*1024U + 1, guard_size);
Juan Yescas65af9a82023-12-07 11:35:21 -0800847 ASSERT_EQ(roundup(32 * 1024U + 1, getpagesize()), GetActualGuardSize(attributes));
Elliott Hughesd6c678c2017-06-27 17:01:57 -0700848}
849
850TEST(pthread, pthread_attr_setguardsize_enormous) {
851 pthread_attr_t attributes;
852 ASSERT_EQ(0, pthread_attr_init(&attributes));
853
854 // Larger than the stack itself. (Historically we mistakenly carved
855 // the guard out of the stack itself, rather than adding it after the
856 // end.)
857 ASSERT_EQ(0, pthread_attr_setguardsize(&attributes, 32*1024*1024));
858 size_t guard_size;
859 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
860 ASSERT_EQ(32*1024*1024U, guard_size);
861 ASSERT_EQ(32*1024*1024U, GetActualGuardSize(attributes));
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700862}
863
864TEST(pthread, pthread_attr_setstacksize) {
865 pthread_attr_t attributes;
866 ASSERT_EQ(0, pthread_attr_init(&attributes));
867
868 // Get the default stack size.
869 size_t default_stack_size;
870 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &default_stack_size));
871
872 // Too small.
873 ASSERT_EQ(EINVAL, pthread_attr_setstacksize(&attributes, 128));
874 size_t stack_size;
875 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
876 ASSERT_EQ(default_stack_size, stack_size);
877 ASSERT_GE(GetActualStackSize(attributes), default_stack_size);
878
Yabin Cui917d3902015-01-08 12:32:42 -0800879 // Large enough and a multiple of the page size; may be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700880 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024));
881 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
882 ASSERT_EQ(32*1024U, stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -0800883 ASSERT_GE(GetActualStackSize(attributes), 32*1024U);
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700884
Yabin Cui917d3902015-01-08 12:32:42 -0800885 // Large enough but not aligned; will be rounded up by pthread_create.
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700886 ASSERT_EQ(0, pthread_attr_setstacksize(&attributes, 32*1024 + 1));
887 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size));
888 ASSERT_EQ(32*1024U + 1, stack_size);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800889#if defined(__BIONIC__)
Yabin Cui917d3902015-01-08 12:32:42 -0800890 ASSERT_GT(GetActualStackSize(attributes), 32*1024U + 1);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800891#else // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700892 // glibc rounds down, in violation of POSIX. They document this in their BUGS section.
893 ASSERT_EQ(GetActualStackSize(attributes), 32*1024U);
Christopher Ferrisf04935c2013-12-20 18:43:21 -0800894#endif // __BIONIC__
Elliott Hughesb95cf0d2013-07-15 14:51:07 -0700895}
Elliott Hughesc3f11402013-10-30 14:40:09 -0700896
Yabin Cui76615da2015-03-17 14:22:09 -0700897TEST(pthread, pthread_rwlockattr_smoke) {
898 pthread_rwlockattr_t attr;
899 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
900
901 int pshared_value_array[] = {PTHREAD_PROCESS_PRIVATE, PTHREAD_PROCESS_SHARED};
902 for (size_t i = 0; i < sizeof(pshared_value_array) / sizeof(pshared_value_array[0]); ++i) {
903 ASSERT_EQ(0, pthread_rwlockattr_setpshared(&attr, pshared_value_array[i]));
904 int pshared;
905 ASSERT_EQ(0, pthread_rwlockattr_getpshared(&attr, &pshared));
906 ASSERT_EQ(pshared_value_array[i], pshared);
907 }
908
Colin Cross4c5595c2021-08-16 15:51:59 -0700909#if !defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -0700910 // musl doesn't have pthread_rwlockattr_setkind_np
Yabin Cui76615da2015-03-17 14:22:09 -0700911 int kind_array[] = {PTHREAD_RWLOCK_PREFER_READER_NP,
912 PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP};
913 for (size_t i = 0; i < sizeof(kind_array) / sizeof(kind_array[0]); ++i) {
914 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_array[i]));
915 int kind;
916 ASSERT_EQ(0, pthread_rwlockattr_getkind_np(&attr, &kind));
917 ASSERT_EQ(kind_array[i], kind);
918 }
Colin Cross7da20342021-07-28 11:18:11 -0700919#endif
Yabin Cui76615da2015-03-17 14:22:09 -0700920
921 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
922}
923
924TEST(pthread, pthread_rwlock_init_same_as_PTHREAD_RWLOCK_INITIALIZER) {
925 pthread_rwlock_t lock1 = PTHREAD_RWLOCK_INITIALIZER;
926 pthread_rwlock_t lock2;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700927 ASSERT_EQ(0, pthread_rwlock_init(&lock2, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -0700928 ASSERT_EQ(0, memcmp(&lock1, &lock2, sizeof(lock1)));
929}
930
Elliott Hughesc3f11402013-10-30 14:40:09 -0700931TEST(pthread, pthread_rwlock_smoke) {
932 pthread_rwlock_t l;
Yi Kong32bc0fc2018-08-02 17:31:13 -0700933 ASSERT_EQ(0, pthread_rwlock_init(&l, nullptr));
Elliott Hughesc3f11402013-10-30 14:40:09 -0700934
Calin Juravle76f352e2014-05-19 13:41:10 +0100935 // Single read lock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700936 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
937 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
938
Calin Juravle76f352e2014-05-19 13:41:10 +0100939 // Multiple read lock
940 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
941 ASSERT_EQ(0, pthread_rwlock_rdlock(&l));
942 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
943 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
944
945 // Write lock
Calin Juravle92687e42014-05-22 19:21:22 +0100946 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
947 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100948
949 // Try writer lock
950 ASSERT_EQ(0, pthread_rwlock_trywrlock(&l));
951 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
952 ASSERT_EQ(EBUSY, pthread_rwlock_tryrdlock(&l));
953 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
954
955 // Try reader lock
956 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
957 ASSERT_EQ(0, pthread_rwlock_tryrdlock(&l));
958 ASSERT_EQ(EBUSY, pthread_rwlock_trywrlock(&l));
959 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
960 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
961
962 // Try writer lock after unlock
Elliott Hughesc3f11402013-10-30 14:40:09 -0700963 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
964 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
965
Calin Juravle76f352e2014-05-19 13:41:10 +0100966 // EDEADLK in "read after write"
967 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
968 ASSERT_EQ(EDEADLK, pthread_rwlock_rdlock(&l));
969 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
970
971 // EDEADLK in "write after write"
972 ASSERT_EQ(0, pthread_rwlock_wrlock(&l));
973 ASSERT_EQ(EDEADLK, pthread_rwlock_wrlock(&l));
974 ASSERT_EQ(0, pthread_rwlock_unlock(&l));
Calin Juravle76f352e2014-05-19 13:41:10 +0100975
Elliott Hughesc3f11402013-10-30 14:40:09 -0700976 ASSERT_EQ(0, pthread_rwlock_destroy(&l));
977}
978
Yabin Cui08ee8d22015-02-11 17:04:36 -0800979struct RwlockWakeupHelperArg {
980 pthread_rwlock_t lock;
981 enum Progress {
982 LOCK_INITIALIZED,
983 LOCK_WAITING,
984 LOCK_RELEASED,
Yabin Cuic9a659c2015-11-05 15:36:08 -0800985 LOCK_ACCESSED,
986 LOCK_TIMEDOUT,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800987 };
988 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -0700989 std::atomic<pid_t> tid;
Yabin Cuic9a659c2015-11-05 15:36:08 -0800990 std::function<int (pthread_rwlock_t*)> trylock_function;
991 std::function<int (pthread_rwlock_t*)> lock_function;
992 std::function<int (pthread_rwlock_t*, const timespec*)> timed_lock_function;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -0800993 clockid_t clock;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800994};
995
Yabin Cuic9a659c2015-11-05 15:36:08 -0800996static void pthread_rwlock_wakeup_helper(RwlockWakeupHelperArg* arg) {
Yabin Cuif7969852015-04-02 17:47:48 -0700997 arg->tid = gettid();
Yabin Cui08ee8d22015-02-11 17:04:36 -0800998 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
999 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1000
Yabin Cuic9a659c2015-11-05 15:36:08 -08001001 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1002 ASSERT_EQ(0, arg->lock_function(&arg->lock));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001003 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_RELEASED, arg->progress);
1004 ASSERT_EQ(0, pthread_rwlock_unlock(&arg->lock));
1005
1006 arg->progress = RwlockWakeupHelperArg::LOCK_ACCESSED;
1007}
1008
Yabin Cuic9a659c2015-11-05 15:36:08 -08001009static void test_pthread_rwlock_reader_wakeup_writer(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -08001010 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001011 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001012 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1013 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07001014 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -08001015 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001016 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -08001017
1018 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001019 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -08001020 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -07001021 WaitUntilThreadSleep(wakeup_arg.tid);
1022 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1023
Yabin Cui08ee8d22015-02-11 17:04:36 -08001024 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
1025 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1026
Yi Kong32bc0fc2018-08-02 17:31:13 -07001027 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001028 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1029 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1030}
1031
Yabin Cuic9a659c2015-11-05 15:36:08 -08001032TEST(pthread, pthread_rwlock_reader_wakeup_writer) {
1033 test_pthread_rwlock_reader_wakeup_writer(pthread_rwlock_wrlock);
Yabin Cui08ee8d22015-02-11 17:04:36 -08001034}
1035
Yabin Cuic9a659c2015-11-05 15:36:08 -08001036TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait) {
1037 timespec ts;
1038 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1039 ts.tv_sec += 1;
1040 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1041 return pthread_rwlock_timedwrlock(lock, &ts);
1042 });
1043}
1044
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001045TEST(pthread, pthread_rwlock_reader_wakeup_writer_timedwait_monotonic_np) {
1046#if defined(__BIONIC__)
1047 timespec ts;
1048 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1049 ts.tv_sec += 1;
1050 test_pthread_rwlock_reader_wakeup_writer(
1051 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedwrlock_monotonic_np(lock, &ts); });
1052#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001053 GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001054#endif // __BIONIC__
1055}
1056
Tom Cherry69010802019-05-07 20:33:05 -07001057TEST(pthread, pthread_rwlock_reader_wakeup_writer_clockwait) {
1058#if defined(__BIONIC__)
1059 timespec ts;
1060 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1061 ts.tv_sec += 1;
1062 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1063 return pthread_rwlock_clockwrlock(lock, CLOCK_MONOTONIC, &ts);
1064 });
1065
1066 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1067 ts.tv_sec += 1;
1068 test_pthread_rwlock_reader_wakeup_writer([&](pthread_rwlock_t* lock) {
1069 return pthread_rwlock_clockwrlock(lock, CLOCK_REALTIME, &ts);
1070 });
1071#else // __BIONIC__
1072 GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1073#endif // __BIONIC__
1074}
1075
Yabin Cuic9a659c2015-11-05 15:36:08 -08001076static void test_pthread_rwlock_writer_wakeup_reader(std::function<int (pthread_rwlock_t*)> lock_function) {
Yabin Cui08ee8d22015-02-11 17:04:36 -08001077 RwlockWakeupHelperArg wakeup_arg;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001078 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001079 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1080 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07001081 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -08001082 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001083 wakeup_arg.lock_function = lock_function;
Yabin Cui08ee8d22015-02-11 17:04:36 -08001084
1085 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001086 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cuic9a659c2015-11-05 15:36:08 -08001087 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_helper), &wakeup_arg));
Yabin Cuif7969852015-04-02 17:47:48 -07001088 WaitUntilThreadSleep(wakeup_arg.tid);
1089 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1090
Yabin Cui08ee8d22015-02-11 17:04:36 -08001091 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_RELEASED;
1092 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1093
Yi Kong32bc0fc2018-08-02 17:31:13 -07001094 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui08ee8d22015-02-11 17:04:36 -08001095 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_ACCESSED, wakeup_arg.progress);
1096 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1097}
1098
Yabin Cuic9a659c2015-11-05 15:36:08 -08001099TEST(pthread, pthread_rwlock_writer_wakeup_reader) {
1100 test_pthread_rwlock_writer_wakeup_reader(pthread_rwlock_rdlock);
1101}
1102
1103TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait) {
1104 timespec ts;
1105 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1106 ts.tv_sec += 1;
1107 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1108 return pthread_rwlock_timedrdlock(lock, &ts);
1109 });
1110}
1111
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001112TEST(pthread, pthread_rwlock_writer_wakeup_reader_timedwait_monotonic_np) {
1113#if defined(__BIONIC__)
1114 timespec ts;
1115 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1116 ts.tv_sec += 1;
1117 test_pthread_rwlock_writer_wakeup_reader(
1118 [&](pthread_rwlock_t* lock) { return pthread_rwlock_timedrdlock_monotonic_np(lock, &ts); });
1119#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001120 GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001121#endif // __BIONIC__
1122}
1123
Tom Cherry69010802019-05-07 20:33:05 -07001124TEST(pthread, pthread_rwlock_writer_wakeup_reader_clockwait) {
1125#if defined(__BIONIC__)
1126 timespec ts;
1127 ASSERT_EQ(0, clock_gettime(CLOCK_MONOTONIC, &ts));
1128 ts.tv_sec += 1;
1129 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1130 return pthread_rwlock_clockrdlock(lock, CLOCK_MONOTONIC, &ts);
1131 });
1132
1133 ASSERT_EQ(0, clock_gettime(CLOCK_REALTIME, &ts));
1134 ts.tv_sec += 1;
1135 test_pthread_rwlock_writer_wakeup_reader([&](pthread_rwlock_t* lock) {
1136 return pthread_rwlock_clockrdlock(lock, CLOCK_REALTIME, &ts);
1137 });
1138#else // __BIONIC__
1139 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1140#endif // __BIONIC__
1141}
1142
Yabin Cuic9a659c2015-11-05 15:36:08 -08001143static void pthread_rwlock_wakeup_timeout_helper(RwlockWakeupHelperArg* arg) {
1144 arg->tid = gettid();
1145 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_INITIALIZED, arg->progress);
1146 arg->progress = RwlockWakeupHelperArg::LOCK_WAITING;
1147
1148 ASSERT_EQ(EBUSY, arg->trylock_function(&arg->lock));
1149
1150 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001151 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001152 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1153 ts.tv_nsec = -1;
1154 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1155 ts.tv_nsec = NS_PER_S;
1156 ASSERT_EQ(EINVAL, arg->timed_lock_function(&arg->lock, &ts));
1157 ts.tv_nsec = NS_PER_S - 1;
1158 ts.tv_sec = -1;
1159 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001160 ASSERT_EQ(0, clock_gettime(arg->clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001161 ts.tv_sec += 1;
1162 ASSERT_EQ(ETIMEDOUT, arg->timed_lock_function(&arg->lock, &ts));
1163 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, arg->progress);
1164 arg->progress = RwlockWakeupHelperArg::LOCK_TIMEDOUT;
1165}
1166
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001167static void pthread_rwlock_timedrdlock_timeout_helper(
1168 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001169 RwlockWakeupHelperArg wakeup_arg;
1170 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1171 ASSERT_EQ(0, pthread_rwlock_wrlock(&wakeup_arg.lock));
1172 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1173 wakeup_arg.tid = 0;
Tom Cherry60ddedf2018-02-20 15:40:02 -08001174 wakeup_arg.trylock_function = &pthread_rwlock_tryrdlock;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001175 wakeup_arg.timed_lock_function = lock_function;
1176 wakeup_arg.clock = clock;
1177
1178 pthread_t thread;
1179 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1180 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1181 WaitUntilThreadSleep(wakeup_arg.tid);
1182 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1183
1184 ASSERT_EQ(0, pthread_join(thread, nullptr));
1185 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1186 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1187 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1188}
1189
1190TEST(pthread, pthread_rwlock_timedrdlock_timeout) {
1191 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedrdlock);
1192}
1193
1194TEST(pthread, pthread_rwlock_timedrdlock_monotonic_np_timeout) {
1195#if defined(__BIONIC__)
1196 pthread_rwlock_timedrdlock_timeout_helper(CLOCK_MONOTONIC,
1197 pthread_rwlock_timedrdlock_monotonic_np);
1198#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001199 GTEST_SKIP() << "pthread_rwlock_timedrdlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001200#endif // __BIONIC__
1201}
1202
Tom Cherry69010802019-05-07 20:33:05 -07001203TEST(pthread, pthread_rwlock_clockrdlock_monotonic_timeout) {
1204#if defined(__BIONIC__)
1205 pthread_rwlock_timedrdlock_timeout_helper(
1206 CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1207 return pthread_rwlock_clockrdlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1208 });
1209#else // __BIONIC__
1210 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1211#endif // __BIONIC__
1212}
1213
1214TEST(pthread, pthread_rwlock_clockrdlock_realtime_timeout) {
1215#if defined(__BIONIC__)
1216 pthread_rwlock_timedrdlock_timeout_helper(
1217 CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1218 return pthread_rwlock_clockrdlock(__rwlock, CLOCK_REALTIME, __timeout);
1219 });
1220#else // __BIONIC__
1221 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1222#endif // __BIONIC__
1223}
1224
1225TEST(pthread, pthread_rwlock_clockrdlock_invalid) {
1226#if defined(__BIONIC__)
1227 pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1228 timespec ts;
1229 EXPECT_EQ(EINVAL, pthread_rwlock_clockrdlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1230#else // __BIONIC__
1231 GTEST_SKIP() << "pthread_rwlock_clockrdlock not available";
1232#endif // __BIONIC__
1233}
1234
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001235static void pthread_rwlock_timedwrlock_timeout_helper(
1236 clockid_t clock, int (*lock_function)(pthread_rwlock_t* __rwlock, const timespec* __timeout)) {
1237 RwlockWakeupHelperArg wakeup_arg;
1238 ASSERT_EQ(0, pthread_rwlock_init(&wakeup_arg.lock, nullptr));
1239 ASSERT_EQ(0, pthread_rwlock_rdlock(&wakeup_arg.lock));
1240 wakeup_arg.progress = RwlockWakeupHelperArg::LOCK_INITIALIZED;
1241 wakeup_arg.tid = 0;
1242 wakeup_arg.trylock_function = &pthread_rwlock_trywrlock;
1243 wakeup_arg.timed_lock_function = lock_function;
1244 wakeup_arg.clock = clock;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001245
1246 pthread_t thread;
1247 ASSERT_EQ(0, pthread_create(&thread, nullptr,
1248 reinterpret_cast<void* (*)(void*)>(pthread_rwlock_wakeup_timeout_helper), &wakeup_arg));
1249 WaitUntilThreadSleep(wakeup_arg.tid);
1250 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_WAITING, wakeup_arg.progress);
1251
1252 ASSERT_EQ(0, pthread_join(thread, nullptr));
1253 ASSERT_EQ(RwlockWakeupHelperArg::LOCK_TIMEDOUT, wakeup_arg.progress);
1254 ASSERT_EQ(0, pthread_rwlock_unlock(&wakeup_arg.lock));
1255 ASSERT_EQ(0, pthread_rwlock_destroy(&wakeup_arg.lock));
1256}
1257
1258TEST(pthread, pthread_rwlock_timedwrlock_timeout) {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001259 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_REALTIME, pthread_rwlock_timedwrlock);
1260}
Yabin Cuic9a659c2015-11-05 15:36:08 -08001261
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001262TEST(pthread, pthread_rwlock_timedwrlock_monotonic_np_timeout) {
1263#if defined(__BIONIC__)
1264 pthread_rwlock_timedwrlock_timeout_helper(CLOCK_MONOTONIC,
1265 pthread_rwlock_timedwrlock_monotonic_np);
1266#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001267 GTEST_SKIP() << "pthread_rwlock_timedwrlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001268#endif // __BIONIC__
Yabin Cuic9a659c2015-11-05 15:36:08 -08001269}
1270
Tom Cherry69010802019-05-07 20:33:05 -07001271TEST(pthread, pthread_rwlock_clockwrlock_monotonic_timeout) {
1272#if defined(__BIONIC__)
1273 pthread_rwlock_timedwrlock_timeout_helper(
1274 CLOCK_MONOTONIC, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1275 return pthread_rwlock_clockwrlock(__rwlock, CLOCK_MONOTONIC, __timeout);
1276 });
1277#else // __BIONIC__
1278 GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1279#endif // __BIONIC__
1280}
1281
1282TEST(pthread, pthread_rwlock_clockwrlock_realtime_timeout) {
1283#if defined(__BIONIC__)
1284 pthread_rwlock_timedwrlock_timeout_helper(
1285 CLOCK_REALTIME, [](pthread_rwlock_t* __rwlock, const timespec* __timeout) {
1286 return pthread_rwlock_clockwrlock(__rwlock, CLOCK_REALTIME, __timeout);
1287 });
1288#else // __BIONIC__
1289 GTEST_SKIP() << "pthread_rwlock_clockwrlock not available";
1290#endif // __BIONIC__
1291}
1292
1293TEST(pthread, pthread_rwlock_clockwrlock_invalid) {
1294#if defined(__BIONIC__)
1295 pthread_rwlock_t lock = PTHREAD_RWLOCK_INITIALIZER;
1296 timespec ts;
1297 EXPECT_EQ(EINVAL, pthread_rwlock_clockwrlock(&lock, CLOCK_PROCESS_CPUTIME_ID, &ts));
1298#else // __BIONIC__
1299 GTEST_SKIP() << "pthread_rwlock_clockrwlock not available";
1300#endif // __BIONIC__
1301}
1302
Colin Cross4c5595c2021-08-16 15:51:59 -07001303#if !defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -07001304// musl doesn't have pthread_rwlockattr_setkind_np
Yabin Cui76615da2015-03-17 14:22:09 -07001305class RwlockKindTestHelper {
1306 private:
1307 struct ThreadArg {
1308 RwlockKindTestHelper* helper;
1309 std::atomic<pid_t>& tid;
1310
1311 ThreadArg(RwlockKindTestHelper* helper, std::atomic<pid_t>& tid)
1312 : helper(helper), tid(tid) { }
1313 };
1314
1315 public:
1316 pthread_rwlock_t lock;
1317
1318 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07001319 explicit RwlockKindTestHelper(int kind_type) {
Yabin Cui76615da2015-03-17 14:22:09 -07001320 InitRwlock(kind_type);
1321 }
1322
1323 ~RwlockKindTestHelper() {
1324 DestroyRwlock();
1325 }
1326
1327 void CreateWriterThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1328 tid = 0;
1329 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001330 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001331 reinterpret_cast<void* (*)(void*)>(WriterThreadFn), arg));
1332 }
1333
1334 void CreateReaderThread(pthread_t& thread, std::atomic<pid_t>& tid) {
1335 tid = 0;
1336 ThreadArg* arg = new ThreadArg(this, tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001337 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui76615da2015-03-17 14:22:09 -07001338 reinterpret_cast<void* (*)(void*)>(ReaderThreadFn), arg));
1339 }
1340
1341 private:
1342 void InitRwlock(int kind_type) {
1343 pthread_rwlockattr_t attr;
1344 ASSERT_EQ(0, pthread_rwlockattr_init(&attr));
1345 ASSERT_EQ(0, pthread_rwlockattr_setkind_np(&attr, kind_type));
1346 ASSERT_EQ(0, pthread_rwlock_init(&lock, &attr));
1347 ASSERT_EQ(0, pthread_rwlockattr_destroy(&attr));
1348 }
1349
1350 void DestroyRwlock() {
1351 ASSERT_EQ(0, pthread_rwlock_destroy(&lock));
1352 }
1353
1354 static void WriterThreadFn(ThreadArg* arg) {
1355 arg->tid = gettid();
1356
1357 RwlockKindTestHelper* helper = arg->helper;
1358 ASSERT_EQ(0, pthread_rwlock_wrlock(&helper->lock));
1359 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1360 delete arg;
1361 }
1362
1363 static void ReaderThreadFn(ThreadArg* arg) {
1364 arg->tid = gettid();
1365
1366 RwlockKindTestHelper* helper = arg->helper;
1367 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper->lock));
1368 ASSERT_EQ(0, pthread_rwlock_unlock(&helper->lock));
1369 delete arg;
1370 }
1371};
Colin Cross7da20342021-07-28 11:18:11 -07001372#endif
Yabin Cui76615da2015-03-17 14:22:09 -07001373
1374TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_READER_NP) {
Colin Cross4c5595c2021-08-16 15:51:59 -07001375#if !defined(ANDROID_HOST_MUSL)
Yabin Cui76615da2015-03-17 14:22:09 -07001376 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_READER_NP);
1377 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1378
1379 pthread_t writer_thread;
1380 std::atomic<pid_t> writer_tid;
1381 helper.CreateWriterThread(writer_thread, writer_tid);
1382 WaitUntilThreadSleep(writer_tid);
1383
1384 pthread_t reader_thread;
1385 std::atomic<pid_t> reader_tid;
1386 helper.CreateReaderThread(reader_thread, reader_tid);
Yi Kong32bc0fc2018-08-02 17:31:13 -07001387 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Yabin Cui76615da2015-03-17 14:22:09 -07001388
1389 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001390 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
Colin Cross7da20342021-07-28 11:18:11 -07001391#else
1392 GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1393#endif
Yabin Cui76615da2015-03-17 14:22:09 -07001394}
1395
1396TEST(pthread, pthread_rwlock_kind_PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) {
Colin Cross4c5595c2021-08-16 15:51:59 -07001397#if !defined(ANDROID_HOST_MUSL)
Yabin Cui76615da2015-03-17 14:22:09 -07001398 RwlockKindTestHelper helper(PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
1399 ASSERT_EQ(0, pthread_rwlock_rdlock(&helper.lock));
1400
1401 pthread_t writer_thread;
1402 std::atomic<pid_t> writer_tid;
1403 helper.CreateWriterThread(writer_thread, writer_tid);
1404 WaitUntilThreadSleep(writer_tid);
1405
1406 pthread_t reader_thread;
1407 std::atomic<pid_t> reader_tid;
1408 helper.CreateReaderThread(reader_thread, reader_tid);
1409 WaitUntilThreadSleep(reader_tid);
1410
1411 ASSERT_EQ(0, pthread_rwlock_unlock(&helper.lock));
Yi Kong32bc0fc2018-08-02 17:31:13 -07001412 ASSERT_EQ(0, pthread_join(writer_thread, nullptr));
1413 ASSERT_EQ(0, pthread_join(reader_thread, nullptr));
Colin Cross7da20342021-07-28 11:18:11 -07001414#else
1415 GTEST_SKIP() << "musl doesn't have pthread_rwlockattr_setkind_np";
1416#endif
Yabin Cui76615da2015-03-17 14:22:09 -07001417}
1418
Elliott Hughes1728b232014-05-14 10:02:03 -07001419static int g_once_fn_call_count = 0;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001420static void OnceFn() {
Elliott Hughes1728b232014-05-14 10:02:03 -07001421 ++g_once_fn_call_count;
Elliott Hughesc3f11402013-10-30 14:40:09 -07001422}
1423
1424TEST(pthread, pthread_once_smoke) {
1425 pthread_once_t once_control = PTHREAD_ONCE_INIT;
1426 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
1427 ASSERT_EQ(0, pthread_once(&once_control, OnceFn));
Elliott Hughes1728b232014-05-14 10:02:03 -07001428 ASSERT_EQ(1, g_once_fn_call_count);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001429}
1430
Elliott Hughes3694ec62014-05-14 11:46:08 -07001431static std::string pthread_once_1934122_result = "";
1432
1433static void Routine2() {
1434 pthread_once_1934122_result += "2";
1435}
1436
1437static void Routine1() {
1438 pthread_once_t once_control_2 = PTHREAD_ONCE_INIT;
1439 pthread_once_1934122_result += "1";
1440 pthread_once(&once_control_2, &Routine2);
1441}
1442
1443TEST(pthread, pthread_once_1934122) {
1444 // Very old versions of Android couldn't call pthread_once from a
1445 // pthread_once init routine. http://b/1934122.
1446 pthread_once_t once_control_1 = PTHREAD_ONCE_INIT;
1447 ASSERT_EQ(0, pthread_once(&once_control_1, &Routine1));
1448 ASSERT_EQ("12", pthread_once_1934122_result);
1449}
1450
Elliott Hughes1728b232014-05-14 10:02:03 -07001451static int g_atfork_prepare_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001452static void AtForkPrepare1() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 1; }
1453static void AtForkPrepare2() { g_atfork_prepare_calls = (g_atfork_prepare_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001454static int g_atfork_parent_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001455static void AtForkParent1() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 1; }
1456static void AtForkParent2() { g_atfork_parent_calls = (g_atfork_parent_calls * 10) + 2; }
Elliott Hughes1728b232014-05-14 10:02:03 -07001457static int g_atfork_child_calls = 0;
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001458static void AtForkChild1() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 1; }
1459static void AtForkChild2() { g_atfork_child_calls = (g_atfork_child_calls * 10) + 2; }
Elliott Hughesc3f11402013-10-30 14:40:09 -07001460
Elliott Hughes2411fff2024-02-24 23:55:58 +00001461TEST(pthread, pthread_atfork_smoke_fork) {
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001462 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1463 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
Elliott Hughesc3f11402013-10-30 14:40:09 -07001464
Elliott Hughes2411fff2024-02-24 23:55:58 +00001465 g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
Elliott Hughes33697a02016-01-26 13:04:57 -08001466 pid_t pid = fork();
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001467 ASSERT_NE(-1, pid) << strerror(errno);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001468
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001469 // Child and parent calls are made in the order they were registered.
1470 if (pid == 0) {
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001471 ASSERT_EQ(12, g_atfork_child_calls);
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001472 _exit(0);
1473 }
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001474 ASSERT_EQ(12, g_atfork_parent_calls);
Elliott Hughesc3f11402013-10-30 14:40:09 -07001475
Dmitriy Ivanovcb0443c2015-03-16 14:15:46 -07001476 // Prepare calls are made in the reverse order.
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001477 ASSERT_EQ(21, g_atfork_prepare_calls);
Elliott Hughes33697a02016-01-26 13:04:57 -08001478 AssertChildExited(pid, 0);
Dmitriy Ivanovea295f62014-11-20 20:47:02 -08001479}
1480
Elliott Hughes2411fff2024-02-24 23:55:58 +00001481TEST(pthread, pthread_atfork_smoke_vfork) {
1482 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1483 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1484
1485 g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1486 pid_t pid = vfork();
1487 ASSERT_NE(-1, pid) << strerror(errno);
1488
1489 // atfork handlers are not called.
1490 if (pid == 0) {
1491 ASSERT_EQ(0, g_atfork_child_calls);
1492 _exit(0);
1493 }
1494 ASSERT_EQ(0, g_atfork_parent_calls);
1495 ASSERT_EQ(0, g_atfork_prepare_calls);
1496 AssertChildExited(pid, 0);
1497}
1498
1499TEST(pthread, pthread_atfork_smoke__Fork) {
1500#if defined(__BIONIC__)
1501 ASSERT_EQ(0, pthread_atfork(AtForkPrepare1, AtForkParent1, AtForkChild1));
1502 ASSERT_EQ(0, pthread_atfork(AtForkPrepare2, AtForkParent2, AtForkChild2));
1503
1504 g_atfork_prepare_calls = g_atfork_parent_calls = g_atfork_child_calls = 0;
1505 pid_t pid = _Fork();
1506 ASSERT_NE(-1, pid) << strerror(errno);
1507
1508 // atfork handlers are not called.
1509 if (pid == 0) {
1510 ASSERT_EQ(0, g_atfork_child_calls);
1511 _exit(0);
1512 }
1513 ASSERT_EQ(0, g_atfork_parent_calls);
1514 ASSERT_EQ(0, g_atfork_prepare_calls);
1515 AssertChildExited(pid, 0);
1516#endif
1517}
1518
Elliott Hughesc3f11402013-10-30 14:40:09 -07001519TEST(pthread, pthread_attr_getscope) {
1520 pthread_attr_t attr;
1521 ASSERT_EQ(0, pthread_attr_init(&attr));
1522
1523 int scope;
1524 ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope));
1525 ASSERT_EQ(PTHREAD_SCOPE_SYSTEM, scope);
1526}
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001527
1528TEST(pthread, pthread_condattr_init) {
1529 pthread_condattr_t attr;
1530 pthread_condattr_init(&attr);
1531
1532 clockid_t clock;
1533 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1534 ASSERT_EQ(CLOCK_REALTIME, clock);
1535
1536 int pshared;
1537 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1538 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
1539}
1540
1541TEST(pthread, pthread_condattr_setclock) {
1542 pthread_condattr_t attr;
1543 pthread_condattr_init(&attr);
1544
1545 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME));
1546 clockid_t clock;
1547 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1548 ASSERT_EQ(CLOCK_REALTIME, clock);
1549
1550 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1551 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1552 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1553
1554 ASSERT_EQ(EINVAL, pthread_condattr_setclock(&attr, CLOCK_PROCESS_CPUTIME_ID));
1555}
1556
1557TEST(pthread, pthread_cond_broadcast__preserves_condattr_flags) {
Yabin Cui32651b82015-03-13 20:30:00 -07001558#if defined(__BIONIC__)
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001559 pthread_condattr_t attr;
1560 pthread_condattr_init(&attr);
1561
1562 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1563 ASSERT_EQ(0, pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
1564
1565 pthread_cond_t cond_var;
1566 ASSERT_EQ(0, pthread_cond_init(&cond_var, &attr));
1567
1568 ASSERT_EQ(0, pthread_cond_signal(&cond_var));
1569 ASSERT_EQ(0, pthread_cond_broadcast(&cond_var));
1570
Yabin Cui32651b82015-03-13 20:30:00 -07001571 attr = static_cast<pthread_condattr_t>(*reinterpret_cast<uint32_t*>(cond_var.__private));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001572 clockid_t clock;
1573 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1574 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1575 int pshared;
1576 ASSERT_EQ(0, pthread_condattr_getpshared(&attr, &pshared));
1577 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
Yabin Cui32651b82015-03-13 20:30:00 -07001578#else // !defined(__BIONIC__)
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001579 GTEST_SKIP() << "bionic-only test";
Yabin Cui32651b82015-03-13 20:30:00 -07001580#endif // !defined(__BIONIC__)
1581}
1582
1583class pthread_CondWakeupTest : public ::testing::Test {
1584 protected:
1585 pthread_mutex_t mutex;
1586 pthread_cond_t cond;
1587
1588 enum Progress {
1589 INITIALIZED,
1590 WAITING,
1591 SIGNALED,
1592 FINISHED,
1593 };
1594 std::atomic<Progress> progress;
1595 pthread_t thread;
Peter Collingbournec5b81842021-12-02 12:38:46 -08001596 timespec ts;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001597 std::function<int (pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function;
Yabin Cui32651b82015-03-13 20:30:00 -07001598
1599 protected:
Yabin Cuic9a659c2015-11-05 15:36:08 -08001600 void SetUp() override {
1601 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1602 }
1603
1604 void InitCond(clockid_t clock=CLOCK_REALTIME) {
1605 pthread_condattr_t attr;
1606 ASSERT_EQ(0, pthread_condattr_init(&attr));
1607 ASSERT_EQ(0, pthread_condattr_setclock(&attr, clock));
1608 ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1609 ASSERT_EQ(0, pthread_condattr_destroy(&attr));
1610 }
1611
Tom Cherry69010802019-05-07 20:33:05 -07001612 void StartWaitingThread(
1613 std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex)> wait_function) {
Yabin Cui32651b82015-03-13 20:30:00 -07001614 progress = INITIALIZED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001615 this->wait_function = wait_function;
Tom Cherry69010802019-05-07 20:33:05 -07001616 ASSERT_EQ(0, pthread_create(&thread, nullptr, reinterpret_cast<void* (*)(void*)>(WaitThreadFn),
1617 this));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001618 while (progress != WAITING) {
Yabin Cui32651b82015-03-13 20:30:00 -07001619 usleep(5000);
1620 }
1621 usleep(5000);
1622 }
1623
Tom Cherry69010802019-05-07 20:33:05 -07001624 void RunTimedTest(
1625 clockid_t clock,
1626 std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* timeout)>
1627 wait_function) {
Tom Cherry69010802019-05-07 20:33:05 -07001628 ASSERT_EQ(0, clock_gettime(clock, &ts));
1629 ts.tv_sec += 1;
1630
Peter Collingbournec5b81842021-12-02 12:38:46 -08001631 StartWaitingThread([&wait_function, this](pthread_cond_t* cond, pthread_mutex_t* mutex) {
Tom Cherry69010802019-05-07 20:33:05 -07001632 return wait_function(cond, mutex, &ts);
1633 });
1634
1635 progress = SIGNALED;
1636 ASSERT_EQ(0, pthread_cond_signal(&cond));
1637 }
1638
1639 void RunTimedTest(clockid_t clock, std::function<int(pthread_cond_t* cond, pthread_mutex_t* mutex,
1640 clockid_t clock, const timespec* timeout)>
1641 wait_function) {
1642 RunTimedTest(clock, [clock, &wait_function](pthread_cond_t* cond, pthread_mutex_t* mutex,
1643 const timespec* timeout) {
1644 return wait_function(cond, mutex, clock, timeout);
1645 });
1646 }
1647
Yabin Cuic9a659c2015-11-05 15:36:08 -08001648 void TearDown() override {
1649 ASSERT_EQ(0, pthread_join(thread, nullptr));
1650 ASSERT_EQ(FINISHED, progress);
1651 ASSERT_EQ(0, pthread_cond_destroy(&cond));
1652 ASSERT_EQ(0, pthread_mutex_destroy(&mutex));
1653 }
1654
Yabin Cui32651b82015-03-13 20:30:00 -07001655 private:
1656 static void WaitThreadFn(pthread_CondWakeupTest* test) {
1657 ASSERT_EQ(0, pthread_mutex_lock(&test->mutex));
1658 test->progress = WAITING;
1659 while (test->progress == WAITING) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001660 ASSERT_EQ(0, test->wait_function(&test->cond, &test->mutex));
Yabin Cui32651b82015-03-13 20:30:00 -07001661 }
1662 ASSERT_EQ(SIGNALED, test->progress);
1663 test->progress = FINISHED;
1664 ASSERT_EQ(0, pthread_mutex_unlock(&test->mutex));
1665 }
1666};
1667
Yabin Cuic9a659c2015-11-05 15:36:08 -08001668TEST_F(pthread_CondWakeupTest, signal_wait) {
1669 InitCond();
1670 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1671 return pthread_cond_wait(cond, mutex);
1672 });
Yabin Cui32651b82015-03-13 20:30:00 -07001673 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001674 ASSERT_EQ(0, pthread_cond_signal(&cond));
Yabin Cui32651b82015-03-13 20:30:00 -07001675}
1676
Yabin Cuic9a659c2015-11-05 15:36:08 -08001677TEST_F(pthread_CondWakeupTest, broadcast_wait) {
1678 InitCond();
1679 StartWaitingThread([](pthread_cond_t* cond, pthread_mutex_t* mutex) {
1680 return pthread_cond_wait(cond, mutex);
1681 });
Yabin Cui32651b82015-03-13 20:30:00 -07001682 progress = SIGNALED;
Yabin Cuic9a659c2015-11-05 15:36:08 -08001683 ASSERT_EQ(0, pthread_cond_broadcast(&cond));
Narayan Kamath51e6cb32014-03-03 15:38:51 +00001684}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001685
Yabin Cuic9a659c2015-11-05 15:36:08 -08001686TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_REALTIME) {
1687 InitCond(CLOCK_REALTIME);
Tom Cherry69010802019-05-07 20:33:05 -07001688 RunTimedTest(CLOCK_REALTIME, pthread_cond_timedwait);
Yabin Cuic9a659c2015-11-05 15:36:08 -08001689}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001690
Yabin Cuic9a659c2015-11-05 15:36:08 -08001691TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC) {
1692 InitCond(CLOCK_MONOTONIC);
Tom Cherry69010802019-05-07 20:33:05 -07001693 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait);
Yabin Cuic9a659c2015-11-05 15:36:08 -08001694}
Elliott Hughes0e714a52014-03-03 16:42:47 -08001695
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001696TEST_F(pthread_CondWakeupTest, signal_timedwait_CLOCK_MONOTONIC_np) {
1697#if defined(__BIONIC__)
1698 InitCond(CLOCK_REALTIME);
Tom Cherry69010802019-05-07 20:33:05 -07001699 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001700#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001701 GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001702#endif // __BIONIC__
1703}
1704
Tom Cherry69010802019-05-07 20:33:05 -07001705TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_monotonic) {
1706#if defined(__BIONIC__)
1707 InitCond(CLOCK_MONOTONIC);
1708 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1709#else // __BIONIC__
1710 GTEST_SKIP() << "pthread_cond_clockwait not available";
1711#endif // __BIONIC__
1712}
1713
1714TEST_F(pthread_CondWakeupTest, signal_clockwait_monotonic_realtime) {
1715#if defined(__BIONIC__)
1716 InitCond(CLOCK_MONOTONIC);
1717 RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1718#else // __BIONIC__
1719 GTEST_SKIP() << "pthread_cond_clockwait not available";
1720#endif // __BIONIC__
1721}
1722
1723TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_monotonic) {
1724#if defined(__BIONIC__)
1725 InitCond(CLOCK_REALTIME);
1726 RunTimedTest(CLOCK_MONOTONIC, pthread_cond_clockwait);
1727#else // __BIONIC__
1728 GTEST_SKIP() << "pthread_cond_clockwait not available";
1729#endif // __BIONIC__
1730}
1731
1732TEST_F(pthread_CondWakeupTest, signal_clockwait_realtime_realtime) {
1733#if defined(__BIONIC__)
1734 InitCond(CLOCK_REALTIME);
1735 RunTimedTest(CLOCK_REALTIME, pthread_cond_clockwait);
1736#else // __BIONIC__
1737 GTEST_SKIP() << "pthread_cond_clockwait not available";
1738#endif // __BIONIC__
1739}
1740
Tom Cherry800c1a92019-07-17 10:45:18 -07001741static void pthread_cond_timedwait_timeout_helper(bool init_monotonic, clockid_t clock,
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001742 int (*wait_function)(pthread_cond_t* __cond,
1743 pthread_mutex_t* __mutex,
1744 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08001745 pthread_mutex_t mutex;
1746 ASSERT_EQ(0, pthread_mutex_init(&mutex, nullptr));
1747 pthread_cond_t cond;
Tom Cherry800c1a92019-07-17 10:45:18 -07001748
1749 if (init_monotonic) {
1750 pthread_condattr_t attr;
1751 pthread_condattr_init(&attr);
1752
1753 ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC));
1754 clockid_t clock;
1755 ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock));
1756 ASSERT_EQ(CLOCK_MONOTONIC, clock);
1757
1758 ASSERT_EQ(0, pthread_cond_init(&cond, &attr));
1759 } else {
1760 ASSERT_EQ(0, pthread_cond_init(&cond, nullptr));
1761 }
Yabin Cuic9a659c2015-11-05 15:36:08 -08001762 ASSERT_EQ(0, pthread_mutex_lock(&mutex));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001763
Yabin Cuic9a659c2015-11-05 15:36:08 -08001764 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001765 ASSERT_EQ(0, clock_gettime(clock, &ts));
1766 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001767 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001768 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001769 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001770 ASSERT_EQ(EINVAL, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001771 ts.tv_nsec = NS_PER_S - 1;
1772 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001773 ASSERT_EQ(ETIMEDOUT, wait_function(&cond, &mutex, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08001774 ASSERT_EQ(0, pthread_mutex_unlock(&mutex));
Elliott Hughes0e714a52014-03-03 16:42:47 -08001775}
Elliott Hughes57b7a612014-08-25 17:26:50 -07001776
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001777TEST(pthread, pthread_cond_timedwait_timeout) {
Tom Cherry800c1a92019-07-17 10:45:18 -07001778 pthread_cond_timedwait_timeout_helper(false, CLOCK_REALTIME, pthread_cond_timedwait);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001779}
1780
1781TEST(pthread, pthread_cond_timedwait_monotonic_np_timeout) {
1782#if defined(__BIONIC__)
Tom Cherry800c1a92019-07-17 10:45:18 -07001783 pthread_cond_timedwait_timeout_helper(false, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
1784 pthread_cond_timedwait_timeout_helper(true, CLOCK_MONOTONIC, pthread_cond_timedwait_monotonic_np);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001785#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08001786 GTEST_SKIP() << "pthread_cond_timedwait_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08001787#endif // __BIONIC__
1788}
1789
Tom Cherry69010802019-05-07 20:33:05 -07001790TEST(pthread, pthread_cond_clockwait_timeout) {
1791#if defined(__BIONIC__)
1792 pthread_cond_timedwait_timeout_helper(
Tom Cherry800c1a92019-07-17 10:45:18 -07001793 false, CLOCK_MONOTONIC,
Tom Cherry69010802019-05-07 20:33:05 -07001794 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1795 return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1796 });
1797 pthread_cond_timedwait_timeout_helper(
Tom Cherry800c1a92019-07-17 10:45:18 -07001798 true, CLOCK_MONOTONIC,
1799 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1800 return pthread_cond_clockwait(__cond, __mutex, CLOCK_MONOTONIC, __timeout);
1801 });
1802 pthread_cond_timedwait_timeout_helper(
1803 false, CLOCK_REALTIME,
1804 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1805 return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1806 });
1807 pthread_cond_timedwait_timeout_helper(
1808 true, CLOCK_REALTIME,
Tom Cherry69010802019-05-07 20:33:05 -07001809 [](pthread_cond_t* __cond, pthread_mutex_t* __mutex, const timespec* __timeout) {
1810 return pthread_cond_clockwait(__cond, __mutex, CLOCK_REALTIME, __timeout);
1811 });
1812#else // __BIONIC__
1813 GTEST_SKIP() << "pthread_cond_clockwait not available";
1814#endif // __BIONIC__
1815}
1816
1817TEST(pthread, pthread_cond_clockwait_invalid) {
1818#if defined(__BIONIC__)
1819 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
1820 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
1821 timespec ts;
1822 EXPECT_EQ(EINVAL, pthread_cond_clockwait(&cond, &mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
1823
1824#else // __BIONIC__
1825 GTEST_SKIP() << "pthread_cond_clockwait not available";
1826#endif // __BIONIC__
1827}
1828
Elliott Hughes57b7a612014-08-25 17:26:50 -07001829TEST(pthread, pthread_attr_getstack__main_thread) {
1830 // This test is only meaningful for the main thread, so make sure we're running on it!
1831 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1832
1833 // Get the main thread's attributes.
1834 pthread_attr_t attributes;
1835 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1836
1837 // Check that we correctly report that the main thread has no guard page.
1838 size_t guard_size;
1839 ASSERT_EQ(0, pthread_attr_getguardsize(&attributes, &guard_size));
1840 ASSERT_EQ(0U, guard_size); // The main thread has no guard page.
1841
1842 // Get the stack base and the stack size (both ways).
1843 void* stack_base;
1844 size_t stack_size;
1845 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1846 size_t stack_size2;
1847 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1848
1849 // The two methods of asking for the stack size should agree.
1850 EXPECT_EQ(stack_size, stack_size2);
1851
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001852#if defined(__BIONIC__)
dimitry6dfa5b52018-01-30 13:24:28 +01001853 // Find stack in /proc/self/maps using a pointer to the stack.
1854 //
1855 // We do not use "[stack]" label because in native-bridge environment it is not
1856 // guaranteed to point to the right stack. A native bridge implementation may
1857 // keep separate stack for the guest code.
Yi Kong32bc0fc2018-08-02 17:31:13 -07001858 void* maps_stack_hi = nullptr;
Elliott Hughes15dfd632015-09-22 16:40:14 -07001859 std::vector<map_record> maps;
1860 ASSERT_TRUE(Maps::parse_maps(&maps));
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08001861 uintptr_t stack_address = reinterpret_cast<uintptr_t>(untag_address(&maps_stack_hi));
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07001862 for (const auto& map : maps) {
dimitry6dfa5b52018-01-30 13:24:28 +01001863 if (map.addr_start <= stack_address && map.addr_end > stack_address){
Elliott Hughes15dfd632015-09-22 16:40:14 -07001864 maps_stack_hi = reinterpret_cast<void*>(map.addr_end);
Elliott Hughes57b7a612014-08-25 17:26:50 -07001865 break;
1866 }
1867 }
Elliott Hughes57b7a612014-08-25 17:26:50 -07001868
dimitry6dfa5b52018-01-30 13:24:28 +01001869 // The high address of the /proc/self/maps stack region should equal stack_base + stack_size.
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001870 // Remember that the stack grows down (and is mapped in on demand), so the low address of the
1871 // region isn't very interesting.
1872 EXPECT_EQ(maps_stack_hi, reinterpret_cast<uint8_t*>(stack_base) + stack_size);
1873
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001874 // The stack size should correspond to RLIMIT_STACK.
Elliott Hughes57b7a612014-08-25 17:26:50 -07001875 rlimit rl;
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001876 ASSERT_EQ(0, getrlimit(RLIMIT_STACK, &rl));
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001877 uint64_t original_rlim_cur = rl.rlim_cur;
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001878 if (rl.rlim_cur == RLIM_INFINITY) {
1879 rl.rlim_cur = 8 * 1024 * 1024; // Bionic reports unlimited stacks as 8MiB.
1880 }
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001881 EXPECT_EQ(rl.rlim_cur, stack_size);
1882
Tom Cherryb8ab6182017-04-05 16:20:29 -07001883 auto guard = android::base::make_scope_guard([&rl, original_rlim_cur]() {
Elliott Hughes27a9aed2014-09-04 16:09:25 -07001884 rl.rlim_cur = original_rlim_cur;
1885 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1886 });
1887
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001888 //
1889 // What if RLIMIT_STACK is smaller than the stack's current extent?
1890 //
Elliott Hughes57b7a612014-08-25 17:26:50 -07001891 rl.rlim_cur = rl.rlim_max = 1024; // 1KiB. We know the stack must be at least a page already.
1892 rl.rlim_max = RLIM_INFINITY;
1893 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1894
1895 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1896 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1897 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1898
1899 EXPECT_EQ(stack_size, stack_size2);
1900 ASSERT_EQ(1024U, stack_size);
1901
1902 //
Elliott Hughes9e4ffa72014-08-27 15:32:01 -07001903 // What if RLIMIT_STACK isn't a whole number of pages?
Elliott Hughes57b7a612014-08-25 17:26:50 -07001904 //
1905 rl.rlim_cur = rl.rlim_max = 6666; // Not a whole number of pages.
1906 rl.rlim_max = RLIM_INFINITY;
1907 ASSERT_EQ(0, setrlimit(RLIMIT_STACK, &rl));
1908
1909 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attributes));
1910 ASSERT_EQ(0, pthread_attr_getstack(&attributes, &stack_base, &stack_size));
1911 ASSERT_EQ(0, pthread_attr_getstacksize(&attributes, &stack_size2));
1912
1913 EXPECT_EQ(stack_size, stack_size2);
1914 ASSERT_EQ(6666U, stack_size);
Yabin Cuib0c6f2db2015-05-19 15:09:23 -07001915#endif
Elliott Hughes57b7a612014-08-25 17:26:50 -07001916}
Elliott Hughes8fb639c2014-09-12 14:43:07 -07001917
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001918struct GetStackSignalHandlerArg {
1919 volatile bool done;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001920 void* signal_stack_base;
1921 size_t signal_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001922 void* main_stack_base;
1923 size_t main_stack_size;
1924};
1925
1926static GetStackSignalHandlerArg getstack_signal_handler_arg;
1927
1928static void getstack_signal_handler(int sig) {
1929 ASSERT_EQ(SIGUSR1, sig);
1930 // Use sleep() to make current thread be switched out by the kernel to provoke the error.
1931 sleep(1);
1932 pthread_attr_t attr;
1933 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1934 void* stack_base;
1935 size_t stack_size;
1936 ASSERT_EQ(0, pthread_attr_getstack(&attr, &stack_base, &stack_size));
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001937
1938 // Verify if the stack used by the signal handler is the alternate stack just registered.
1939 ASSERT_LE(getstack_signal_handler_arg.signal_stack_base, &attr);
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08001940 ASSERT_LT(static_cast<void*>(untag_address(&attr)),
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001941 static_cast<char*>(getstack_signal_handler_arg.signal_stack_base) +
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08001942 getstack_signal_handler_arg.signal_stack_size);
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001943
1944 // Verify if the main thread's stack got in the signal handler is correct.
1945 ASSERT_EQ(getstack_signal_handler_arg.main_stack_base, stack_base);
1946 ASSERT_LE(getstack_signal_handler_arg.main_stack_size, stack_size);
1947
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001948 getstack_signal_handler_arg.done = true;
1949}
1950
1951// The previous code obtained the main thread's stack by reading the entry in
1952// /proc/self/task/<pid>/maps that was labeled [stack]. Unfortunately, on x86/x86_64, the kernel
1953// relies on sp0 in task state segment(tss) to label the stack map with [stack]. If the kernel
1954// switches a process while the main thread is in an alternate stack, then the kernel will label
1955// the wrong map with [stack]. This test verifies that when the above situation happens, the main
1956// thread's stack is found correctly.
1957TEST(pthread, pthread_attr_getstack_in_signal_handler) {
Yabin Cui61e4d462016-03-07 17:44:58 -08001958 // This test is only meaningful for the main thread, so make sure we're running on it!
1959 ASSERT_EQ(getpid(), syscall(__NR_gettid));
1960
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001961 const size_t sig_stack_size = 16 * 1024;
Yi Kong32bc0fc2018-08-02 17:31:13 -07001962 void* sig_stack = mmap(nullptr, sig_stack_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS,
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001963 -1, 0);
1964 ASSERT_NE(MAP_FAILED, sig_stack);
1965 stack_t ss;
1966 ss.ss_sp = sig_stack;
1967 ss.ss_size = sig_stack_size;
1968 ss.ss_flags = 0;
1969 stack_t oss;
1970 ASSERT_EQ(0, sigaltstack(&ss, &oss));
1971
Yabin Cui61e4d462016-03-07 17:44:58 -08001972 pthread_attr_t attr;
1973 ASSERT_EQ(0, pthread_getattr_np(pthread_self(), &attr));
1974 void* main_stack_base;
1975 size_t main_stack_size;
1976 ASSERT_EQ(0, pthread_attr_getstack(&attr, &main_stack_base, &main_stack_size));
1977
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001978 ScopedSignalHandler handler(SIGUSR1, getstack_signal_handler, SA_ONSTACK);
1979 getstack_signal_handler_arg.done = false;
Chih-Hung Hsieh9af13d22016-06-02 14:40:09 -07001980 getstack_signal_handler_arg.signal_stack_base = sig_stack;
1981 getstack_signal_handler_arg.signal_stack_size = sig_stack_size;
1982 getstack_signal_handler_arg.main_stack_base = main_stack_base;
1983 getstack_signal_handler_arg.main_stack_size = main_stack_size;
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001984 kill(getpid(), SIGUSR1);
1985 ASSERT_EQ(true, getstack_signal_handler_arg.done);
1986
Mor-sarid, Nitzan56933322015-09-11 05:31:36 +00001987 ASSERT_EQ(0, sigaltstack(&oss, nullptr));
1988 ASSERT_EQ(0, munmap(sig_stack, sig_stack_size));
1989}
1990
Yabin Cui917d3902015-01-08 12:32:42 -08001991static void pthread_attr_getstack_18908062_helper(void*) {
1992 char local_variable;
1993 pthread_attr_t attributes;
1994 pthread_getattr_np(pthread_self(), &attributes);
1995 void* stack_base;
1996 size_t stack_size;
1997 pthread_attr_getstack(&attributes, &stack_base, &stack_size);
1998
1999 // Test whether &local_variable is in [stack_base, stack_base + stack_size).
2000 ASSERT_LE(reinterpret_cast<char*>(stack_base), &local_variable);
Evgenii Stepanov7cc67062019-02-05 18:43:34 -08002001 ASSERT_LT(untag_address(&local_variable), reinterpret_cast<char*>(stack_base) + stack_size);
Yabin Cui917d3902015-01-08 12:32:42 -08002002}
2003
2004// Check whether something on stack is in the range of
2005// [stack_base, stack_base + stack_size). see b/18908062.
2006TEST(pthread, pthread_attr_getstack_18908062) {
2007 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002008 ASSERT_EQ(0, pthread_create(&t, nullptr,
Yabin Cui917d3902015-01-08 12:32:42 -08002009 reinterpret_cast<void* (*)(void*)>(pthread_attr_getstack_18908062_helper),
Yi Kong32bc0fc2018-08-02 17:31:13 -07002010 nullptr));
2011 ASSERT_EQ(0, pthread_join(t, nullptr));
Yabin Cui917d3902015-01-08 12:32:42 -08002012}
2013
Elliott Hughes8fb639c2014-09-12 14:43:07 -07002014#if defined(__BIONIC__)
Elliott Hughesf2083612015-11-11 13:32:28 -08002015static pthread_mutex_t pthread_gettid_np_mutex = PTHREAD_MUTEX_INITIALIZER;
2016
Elliott Hughes8fb639c2014-09-12 14:43:07 -07002017static void* pthread_gettid_np_helper(void* arg) {
2018 *reinterpret_cast<pid_t*>(arg) = gettid();
Elliott Hughesf2083612015-11-11 13:32:28 -08002019
2020 // Wait for our parent to call pthread_gettid_np on us before exiting.
2021 pthread_mutex_lock(&pthread_gettid_np_mutex);
2022 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07002023 return nullptr;
Elliott Hughes8fb639c2014-09-12 14:43:07 -07002024}
2025#endif
2026
2027TEST(pthread, pthread_gettid_np) {
2028#if defined(__BIONIC__)
2029 ASSERT_EQ(gettid(), pthread_gettid_np(pthread_self()));
2030
Elliott Hughesf2083612015-11-11 13:32:28 -08002031 // Ensure the other thread doesn't exit until after we've called
2032 // pthread_gettid_np on it.
2033 pthread_mutex_lock(&pthread_gettid_np_mutex);
2034
Elliott Hughes8fb639c2014-09-12 14:43:07 -07002035 pid_t t_gettid_result;
2036 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002037 pthread_create(&t, nullptr, pthread_gettid_np_helper, &t_gettid_result);
Elliott Hughes8fb639c2014-09-12 14:43:07 -07002038
2039 pid_t t_pthread_gettid_np_result = pthread_gettid_np(t);
2040
Elliott Hughesf2083612015-11-11 13:32:28 -08002041 // Release the other thread and wait for it to exit.
2042 pthread_mutex_unlock(&pthread_gettid_np_mutex);
Yi Kong32bc0fc2018-08-02 17:31:13 -07002043 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes8fb639c2014-09-12 14:43:07 -07002044
2045 ASSERT_EQ(t_gettid_result, t_pthread_gettid_np_result);
2046#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002047 GTEST_SKIP() << "pthread_gettid_np not available";
Elliott Hughes8fb639c2014-09-12 14:43:07 -07002048#endif
2049}
Elliott Hughes34c987a2014-09-22 16:01:26 -07002050
2051static size_t cleanup_counter = 0;
2052
Derek Xue41996952014-09-25 11:05:32 +01002053static void AbortCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07002054 abort();
2055}
2056
Derek Xue41996952014-09-25 11:05:32 +01002057static void CountCleanupRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07002058 ++cleanup_counter;
2059}
2060
Derek Xue41996952014-09-25 11:05:32 +01002061static void PthreadCleanupTester() {
Yi Kong32bc0fc2018-08-02 17:31:13 -07002062 pthread_cleanup_push(CountCleanupRoutine, nullptr);
2063 pthread_cleanup_push(CountCleanupRoutine, nullptr);
2064 pthread_cleanup_push(AbortCleanupRoutine, nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07002065
2066 pthread_cleanup_pop(0); // Pop the abort without executing it.
2067 pthread_cleanup_pop(1); // Pop one count while executing it.
2068 ASSERT_EQ(1U, cleanup_counter);
2069 // Exit while the other count is still on the cleanup stack.
Yi Kong32bc0fc2018-08-02 17:31:13 -07002070 pthread_exit(nullptr);
Elliott Hughes34c987a2014-09-22 16:01:26 -07002071
2072 // Calls to pthread_cleanup_pop/pthread_cleanup_push must always be balanced.
2073 pthread_cleanup_pop(0);
2074}
2075
Derek Xue41996952014-09-25 11:05:32 +01002076static void* PthreadCleanupStartRoutine(void*) {
Elliott Hughes34c987a2014-09-22 16:01:26 -07002077 PthreadCleanupTester();
Yi Kong32bc0fc2018-08-02 17:31:13 -07002078 return nullptr;
Elliott Hughes34c987a2014-09-22 16:01:26 -07002079}
2080
2081TEST(pthread, pthread_cleanup_push__pthread_cleanup_pop) {
2082 pthread_t t;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002083 ASSERT_EQ(0, pthread_create(&t, nullptr, PthreadCleanupStartRoutine, nullptr));
2084 ASSERT_EQ(0, pthread_join(t, nullptr));
Elliott Hughes34c987a2014-09-22 16:01:26 -07002085 ASSERT_EQ(2U, cleanup_counter);
2086}
Derek Xue41996952014-09-25 11:05:32 +01002087
2088TEST(pthread, PTHREAD_MUTEX_DEFAULT_is_PTHREAD_MUTEX_NORMAL) {
2089 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_DEFAULT);
2090}
2091
2092TEST(pthread, pthread_mutexattr_gettype) {
2093 pthread_mutexattr_t attr;
2094 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2095
2096 int attr_type;
2097
2098 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL));
2099 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2100 ASSERT_EQ(PTHREAD_MUTEX_NORMAL, attr_type);
2101
2102 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK));
2103 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2104 ASSERT_EQ(PTHREAD_MUTEX_ERRORCHECK, attr_type);
2105
2106 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE));
2107 ASSERT_EQ(0, pthread_mutexattr_gettype(&attr, &attr_type));
2108 ASSERT_EQ(PTHREAD_MUTEX_RECURSIVE, attr_type);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002109
2110 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2111}
2112
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002113TEST(pthread, pthread_mutexattr_protocol) {
2114 pthread_mutexattr_t attr;
2115 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2116
2117 int protocol;
2118 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2119 ASSERT_EQ(PTHREAD_PRIO_NONE, protocol);
2120 for (size_t repeat = 0; repeat < 2; ++repeat) {
2121 for (int set_protocol : {PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT}) {
2122 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, set_protocol));
2123 ASSERT_EQ(0, pthread_mutexattr_getprotocol(&attr, &protocol));
2124 ASSERT_EQ(protocol, set_protocol);
2125 }
2126 }
2127}
2128
Yabin Cui17393b02015-03-21 15:08:25 -07002129struct PthreadMutex {
2130 pthread_mutex_t lock;
2131
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002132 explicit PthreadMutex(int mutex_type, int protocol = PTHREAD_PRIO_NONE) {
2133 init(mutex_type, protocol);
Yabin Cui17393b02015-03-21 15:08:25 -07002134 }
2135
2136 ~PthreadMutex() {
2137 destroy();
2138 }
2139
2140 private:
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002141 void init(int mutex_type, int protocol) {
Yabin Cui17393b02015-03-21 15:08:25 -07002142 pthread_mutexattr_t attr;
2143 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2144 ASSERT_EQ(0, pthread_mutexattr_settype(&attr, mutex_type));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002145 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, protocol));
Yabin Cui17393b02015-03-21 15:08:25 -07002146 ASSERT_EQ(0, pthread_mutex_init(&lock, &attr));
2147 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2148 }
2149
2150 void destroy() {
2151 ASSERT_EQ(0, pthread_mutex_destroy(&lock));
2152 }
2153
2154 DISALLOW_COPY_AND_ASSIGN(PthreadMutex);
2155};
Derek Xue41996952014-09-25 11:05:32 +01002156
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002157static int UnlockFromAnotherThread(pthread_mutex_t* mutex) {
2158 pthread_t thread;
2159 pthread_create(&thread, nullptr, [](void* mutex_voidp) -> void* {
2160 pthread_mutex_t* mutex = static_cast<pthread_mutex_t*>(mutex_voidp);
2161 intptr_t result = pthread_mutex_unlock(mutex);
2162 return reinterpret_cast<void*>(result);
2163 }, mutex);
2164 void* result;
2165 EXPECT_EQ(0, pthread_join(thread, &result));
2166 return reinterpret_cast<intptr_t>(result);
2167};
2168
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002169static void TestPthreadMutexLockNormal(int protocol) {
2170 PthreadMutex m(PTHREAD_MUTEX_NORMAL, protocol);
Derek Xue41996952014-09-25 11:05:32 +01002171
Yabin Cui17393b02015-03-21 15:08:25 -07002172 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002173 if (protocol == PTHREAD_PRIO_INHERIT) {
2174 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
2175 }
Yabin Cui17393b02015-03-21 15:08:25 -07002176 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08002177 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2178 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2179 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01002180}
2181
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002182static void TestPthreadMutexLockErrorCheck(int protocol) {
2183 PthreadMutex m(PTHREAD_MUTEX_ERRORCHECK, protocol);
Derek Xue41996952014-09-25 11:05:32 +01002184
Yabin Cui17393b02015-03-21 15:08:25 -07002185 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002186 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002187 ASSERT_EQ(EDEADLK, pthread_mutex_lock(&m.lock));
2188 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2189 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002190 if (protocol == PTHREAD_PRIO_NONE) {
2191 ASSERT_EQ(EBUSY, pthread_mutex_trylock(&m.lock));
2192 } else {
2193 ASSERT_EQ(EDEADLK, pthread_mutex_trylock(&m.lock));
2194 }
Yabin Cui17393b02015-03-21 15:08:25 -07002195 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2196 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
Derek Xue41996952014-09-25 11:05:32 +01002197}
2198
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002199static void TestPthreadMutexLockRecursive(int protocol) {
2200 PthreadMutex m(PTHREAD_MUTEX_RECURSIVE, protocol);
Derek Xue41996952014-09-25 11:05:32 +01002201
Yabin Cui17393b02015-03-21 15:08:25 -07002202 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002203 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002204 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Ryan Prichard4b6c0f52019-04-18 22:47:04 -07002205 ASSERT_EQ(EPERM, UnlockFromAnotherThread(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002206 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2207 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2208 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
Elliott Hughesd31d4c12015-12-14 17:35:10 -08002209 ASSERT_EQ(0, pthread_mutex_trylock(&m.lock));
2210 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui17393b02015-03-21 15:08:25 -07002211 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2212 ASSERT_EQ(EPERM, pthread_mutex_unlock(&m.lock));
2213}
2214
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002215TEST(pthread, pthread_mutex_lock_NORMAL) {
2216 TestPthreadMutexLockNormal(PTHREAD_PRIO_NONE);
2217}
2218
2219TEST(pthread, pthread_mutex_lock_ERRORCHECK) {
2220 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_NONE);
2221}
2222
2223TEST(pthread, pthread_mutex_lock_RECURSIVE) {
2224 TestPthreadMutexLockRecursive(PTHREAD_PRIO_NONE);
2225}
2226
2227TEST(pthread, pthread_mutex_lock_pi) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002228 TestPthreadMutexLockNormal(PTHREAD_PRIO_INHERIT);
2229 TestPthreadMutexLockErrorCheck(PTHREAD_PRIO_INHERIT);
2230 TestPthreadMutexLockRecursive(PTHREAD_PRIO_INHERIT);
2231}
2232
Yabin Cui5a00ba72018-01-26 17:32:31 -08002233TEST(pthread, pthread_mutex_pi_count_limit) {
2234#if defined(__BIONIC__) && !defined(__LP64__)
2235 // Bionic only supports 65536 pi mutexes in 32-bit programs.
2236 pthread_mutexattr_t attr;
2237 ASSERT_EQ(0, pthread_mutexattr_init(&attr));
2238 ASSERT_EQ(0, pthread_mutexattr_setprotocol(&attr, PTHREAD_PRIO_INHERIT));
2239 std::vector<pthread_mutex_t> mutexes(65536);
2240 // Test if we can use 65536 pi mutexes at the same time.
2241 // Run 2 times to check if freed pi mutexes can be recycled.
2242 for (int repeat = 0; repeat < 2; ++repeat) {
2243 for (auto& m : mutexes) {
2244 ASSERT_EQ(0, pthread_mutex_init(&m, &attr));
2245 }
2246 pthread_mutex_t m;
2247 ASSERT_EQ(ENOMEM, pthread_mutex_init(&m, &attr));
2248 for (auto& m : mutexes) {
2249 ASSERT_EQ(0, pthread_mutex_lock(&m));
2250 }
2251 for (auto& m : mutexes) {
2252 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2253 }
2254 for (auto& m : mutexes) {
2255 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2256 }
2257 }
2258 ASSERT_EQ(0, pthread_mutexattr_destroy(&attr));
2259#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002260 GTEST_SKIP() << "pi mutex count not limited to 64Ki";
Yabin Cui5a00ba72018-01-26 17:32:31 -08002261#endif
2262}
2263
Yabin Cui17393b02015-03-21 15:08:25 -07002264TEST(pthread, pthread_mutex_init_same_as_static_initializers) {
2265 pthread_mutex_t lock_normal = PTHREAD_MUTEX_INITIALIZER;
2266 PthreadMutex m1(PTHREAD_MUTEX_NORMAL);
2267 ASSERT_EQ(0, memcmp(&lock_normal, &m1.lock, sizeof(pthread_mutex_t)));
2268 pthread_mutex_destroy(&lock_normal);
2269
Colin Cross4c5595c2021-08-16 15:51:59 -07002270#if !defined(ANDROID_HOST_MUSL)
Colin Cross7da20342021-07-28 11:18:11 -07002271 // musl doesn't support PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP or
2272 // PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP.
Yabin Cui17393b02015-03-21 15:08:25 -07002273 pthread_mutex_t lock_errorcheck = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
2274 PthreadMutex m2(PTHREAD_MUTEX_ERRORCHECK);
2275 ASSERT_EQ(0, memcmp(&lock_errorcheck, &m2.lock, sizeof(pthread_mutex_t)));
2276 pthread_mutex_destroy(&lock_errorcheck);
2277
2278 pthread_mutex_t lock_recursive = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
2279 PthreadMutex m3(PTHREAD_MUTEX_RECURSIVE);
2280 ASSERT_EQ(0, memcmp(&lock_recursive, &m3.lock, sizeof(pthread_mutex_t)));
2281 ASSERT_EQ(0, pthread_mutex_destroy(&lock_recursive));
Colin Cross7da20342021-07-28 11:18:11 -07002282#endif
Derek Xue41996952014-09-25 11:05:32 +01002283}
Yabin Cui5a00ba72018-01-26 17:32:31 -08002284
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002285class MutexWakeupHelper {
2286 private:
Yabin Cui17393b02015-03-21 15:08:25 -07002287 PthreadMutex m;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002288 enum Progress {
2289 LOCK_INITIALIZED,
2290 LOCK_WAITING,
2291 LOCK_RELEASED,
2292 LOCK_ACCESSED
2293 };
2294 std::atomic<Progress> progress;
Yabin Cuif7969852015-04-02 17:47:48 -07002295 std::atomic<pid_t> tid;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002296
2297 static void thread_fn(MutexWakeupHelper* helper) {
Yabin Cuif7969852015-04-02 17:47:48 -07002298 helper->tid = gettid();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002299 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2300 helper->progress = LOCK_WAITING;
2301
Yabin Cui17393b02015-03-21 15:08:25 -07002302 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002303 ASSERT_EQ(LOCK_RELEASED, helper->progress);
Yabin Cui17393b02015-03-21 15:08:25 -07002304 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002305
2306 helper->progress = LOCK_ACCESSED;
2307 }
2308
2309 public:
Chih-Hung Hsieh62e3a072016-05-03 12:08:05 -07002310 explicit MutexWakeupHelper(int mutex_type) : m(mutex_type) {
Yabin Cui17393b02015-03-21 15:08:25 -07002311 }
2312
2313 void test() {
2314 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002315 progress = LOCK_INITIALIZED;
Yabin Cuif7969852015-04-02 17:47:48 -07002316 tid = 0;
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002317
2318 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002319 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002320 reinterpret_cast<void* (*)(void*)>(MutexWakeupHelper::thread_fn), this));
2321
Yabin Cuif7969852015-04-02 17:47:48 -07002322 WaitUntilThreadSleep(tid);
2323 ASSERT_EQ(LOCK_WAITING, progress);
2324
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002325 progress = LOCK_RELEASED;
Yabin Cui17393b02015-03-21 15:08:25 -07002326 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002327
Yi Kong32bc0fc2018-08-02 17:31:13 -07002328 ASSERT_EQ(0, pthread_join(thread, nullptr));
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002329 ASSERT_EQ(LOCK_ACCESSED, progress);
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002330 }
2331};
2332
2333TEST(pthread, pthread_mutex_NORMAL_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002334 MutexWakeupHelper helper(PTHREAD_MUTEX_NORMAL);
2335 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002336}
2337
2338TEST(pthread, pthread_mutex_ERRORCHECK_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002339 MutexWakeupHelper helper(PTHREAD_MUTEX_ERRORCHECK);
2340 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002341}
2342
2343TEST(pthread, pthread_mutex_RECURSIVE_wakeup) {
Yabin Cui17393b02015-03-21 15:08:25 -07002344 MutexWakeupHelper helper(PTHREAD_MUTEX_RECURSIVE);
2345 helper.test();
Yabin Cui5b8e7cd2015-03-04 17:36:59 -08002346}
2347
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002348static int GetThreadPriority(pid_t tid) {
2349 // sched_getparam() returns the static priority of a thread, which can't reflect a thread's
2350 // priority after priority inheritance. So read /proc/<pid>/stat to get the dynamic priority.
2351 std::string filename = android::base::StringPrintf("/proc/%d/stat", tid);
2352 std::string content;
2353 int result = INT_MAX;
2354 if (!android::base::ReadFileToString(filename, &content)) {
2355 return result;
2356 }
2357 std::vector<std::string> strs = android::base::Split(content, " ");
2358 if (strs.size() < 18) {
2359 return result;
2360 }
2361 if (!android::base::ParseInt(strs[17], &result)) {
2362 return INT_MAX;
2363 }
2364 return result;
2365}
2366
2367class PIMutexWakeupHelper {
2368private:
2369 PthreadMutex m;
2370 int protocol;
2371 enum Progress {
2372 LOCK_INITIALIZED,
2373 LOCK_CHILD_READY,
2374 LOCK_WAITING,
2375 LOCK_RELEASED,
2376 };
2377 std::atomic<Progress> progress;
2378 std::atomic<pid_t> main_tid;
2379 std::atomic<pid_t> child_tid;
2380 PthreadMutex start_thread_m;
2381
2382 static void thread_fn(PIMutexWakeupHelper* helper) {
2383 helper->child_tid = gettid();
2384 ASSERT_EQ(LOCK_INITIALIZED, helper->progress);
2385 ASSERT_EQ(0, setpriority(PRIO_PROCESS, gettid(), 1));
2386 ASSERT_EQ(21, GetThreadPriority(gettid()));
2387 ASSERT_EQ(0, pthread_mutex_lock(&helper->m.lock));
2388 helper->progress = LOCK_CHILD_READY;
2389 ASSERT_EQ(0, pthread_mutex_lock(&helper->start_thread_m.lock));
2390
2391 ASSERT_EQ(0, pthread_mutex_unlock(&helper->start_thread_m.lock));
2392 WaitUntilThreadSleep(helper->main_tid);
2393 ASSERT_EQ(LOCK_WAITING, helper->progress);
2394
2395 if (helper->protocol == PTHREAD_PRIO_INHERIT) {
2396 ASSERT_EQ(20, GetThreadPriority(gettid()));
2397 } else {
2398 ASSERT_EQ(21, GetThreadPriority(gettid()));
2399 }
2400 helper->progress = LOCK_RELEASED;
2401 ASSERT_EQ(0, pthread_mutex_unlock(&helper->m.lock));
2402 }
2403
2404public:
2405 explicit PIMutexWakeupHelper(int mutex_type, int protocol)
2406 : m(mutex_type, protocol), protocol(protocol), start_thread_m(PTHREAD_MUTEX_NORMAL) {
2407 }
2408
2409 void test() {
2410 ASSERT_EQ(0, pthread_mutex_lock(&start_thread_m.lock));
2411 main_tid = gettid();
2412 ASSERT_EQ(20, GetThreadPriority(main_tid));
2413 progress = LOCK_INITIALIZED;
2414 child_tid = 0;
2415
2416 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002417 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002418 reinterpret_cast<void* (*)(void*)>(PIMutexWakeupHelper::thread_fn), this));
2419
2420 WaitUntilThreadSleep(child_tid);
2421 ASSERT_EQ(LOCK_CHILD_READY, progress);
2422 ASSERT_EQ(0, pthread_mutex_unlock(&start_thread_m.lock));
2423 progress = LOCK_WAITING;
2424 ASSERT_EQ(0, pthread_mutex_lock(&m.lock));
2425
2426 ASSERT_EQ(LOCK_RELEASED, progress);
2427 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2428 ASSERT_EQ(0, pthread_join(thread, nullptr));
2429 }
2430};
2431
2432TEST(pthread, pthread_mutex_pi_wakeup) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002433 for (int type : {PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK}) {
2434 for (int protocol : {PTHREAD_PRIO_INHERIT}) {
2435 PIMutexWakeupHelper helper(type, protocol);
2436 helper.test();
2437 }
2438 }
2439}
2440
Yabin Cui140f3672015-02-03 10:32:00 -08002441TEST(pthread, pthread_mutex_owner_tid_limit) {
Yabin Cuie69c2452015-02-13 16:21:25 -08002442#if defined(__BIONIC__) && !defined(__LP64__)
Yabin Cui140f3672015-02-03 10:32:00 -08002443 FILE* fp = fopen("/proc/sys/kernel/pid_max", "r");
Yi Kong32bc0fc2018-08-02 17:31:13 -07002444 ASSERT_TRUE(fp != nullptr);
Yabin Cui140f3672015-02-03 10:32:00 -08002445 long pid_max;
2446 ASSERT_EQ(1, fscanf(fp, "%ld", &pid_max));
2447 fclose(fp);
Yabin Cuie69c2452015-02-13 16:21:25 -08002448 // Bionic's pthread_mutex implementation on 32-bit devices uses 16 bits to represent owner tid.
Yabin Cui140f3672015-02-03 10:32:00 -08002449 ASSERT_LE(pid_max, 65536);
Yabin Cuie69c2452015-02-13 16:21:25 -08002450#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002451 GTEST_SKIP() << "pthread_mutex supports 32-bit tid";
Yabin Cuie69c2452015-02-13 16:21:25 -08002452#endif
Yabin Cui140f3672015-02-03 10:32:00 -08002453}
Yabin Cuib5845722015-03-16 22:46:42 -07002454
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002455static void pthread_mutex_timedlock_helper(clockid_t clock,
2456 int (*lock_function)(pthread_mutex_t* __mutex,
2457 const timespec* __timeout)) {
Yabin Cuic9a659c2015-11-05 15:36:08 -08002458 pthread_mutex_t m;
2459 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2460
2461 // If the mutex is already locked, pthread_mutex_timedlock should time out.
2462 ASSERT_EQ(0, pthread_mutex_lock(&m));
2463
2464 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002465 ASSERT_EQ(0, clock_gettime(clock, &ts));
2466 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002467 ts.tv_nsec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002468 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002469 ts.tv_nsec = NS_PER_S;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002470 ASSERT_EQ(EINVAL, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002471 ts.tv_nsec = NS_PER_S - 1;
2472 ts.tv_sec = -1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002473 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002474
Elliott Hugheseabc47b2024-11-08 22:05:00 +00002475 // Check we wait long enough for the lock before timing out...
2476
2477 // What time is it before we start?
Andy Hung5e19b182023-12-11 19:28:02 -08002478 ASSERT_EQ(0, clock_gettime(clock, &ts));
Elliott Hugheseabc47b2024-11-08 22:05:00 +00002479 const int64_t start_ns = to_ns(ts);
2480 // Add a second to get deadline, and wait until we time out.
Andy Hung5e19b182023-12-11 19:28:02 -08002481 ts.tv_sec += 1;
Andy Hung5e19b182023-12-11 19:28:02 -08002482 ASSERT_EQ(ETIMEDOUT, lock_function(&m, &ts));
2483
Elliott Hugheseabc47b2024-11-08 22:05:00 +00002484 // What time is it now we've timed out?
2485 timespec ts2;
2486 clock_gettime(clock, &ts2);
2487 const int64_t end_ns = to_ns(ts2);
2488
Andy Hung5e19b182023-12-11 19:28:02 -08002489 // The timedlock must have waited at least 1 second before returning.
Elliott Hugheseabc47b2024-11-08 22:05:00 +00002490 ASSERT_GE(end_ns - start_ns, NS_PER_S);
Andy Hung5e19b182023-12-11 19:28:02 -08002491
Yabin Cuic9a659c2015-11-05 15:36:08 -08002492 // If the mutex is unlocked, pthread_mutex_timedlock should succeed.
2493 ASSERT_EQ(0, pthread_mutex_unlock(&m));
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002494 ASSERT_EQ(0, clock_gettime(clock, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002495 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002496 ASSERT_EQ(0, lock_function(&m, &ts));
Yabin Cuic9a659c2015-11-05 15:36:08 -08002497
2498 ASSERT_EQ(0, pthread_mutex_unlock(&m));
2499 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2500}
2501
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002502TEST(pthread, pthread_mutex_timedlock) {
2503 pthread_mutex_timedlock_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2504}
2505
2506TEST(pthread, pthread_mutex_timedlock_monotonic_np) {
2507#if defined(__BIONIC__)
2508 pthread_mutex_timedlock_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2509#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002510 GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002511#endif // __BIONIC__
2512}
2513
Elliott Hugheseabc47b2024-11-08 22:05:00 +00002514TEST(pthread, pthread_mutex_clocklock_MONOTONIC) {
Tom Cherry69010802019-05-07 20:33:05 -07002515#if defined(__BIONIC__)
2516 pthread_mutex_timedlock_helper(
2517 CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2518 return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2519 });
Elliott Hugheseabc47b2024-11-08 22:05:00 +00002520#else // __BIONIC__
2521 GTEST_SKIP() << "pthread_mutex_clocklock not available";
2522#endif // __BIONIC__
2523}
2524
2525TEST(pthread, pthread_mutex_clocklock_REALTIME) {
2526#if defined(__BIONIC__)
Tom Cherry69010802019-05-07 20:33:05 -07002527 pthread_mutex_timedlock_helper(
2528 CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2529 return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2530 });
2531#else // __BIONIC__
2532 GTEST_SKIP() << "pthread_mutex_clocklock not available";
2533#endif // __BIONIC__
2534}
2535
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002536static void pthread_mutex_timedlock_pi_helper(clockid_t clock,
2537 int (*lock_function)(pthread_mutex_t* __mutex,
2538 const timespec* __timeout)) {
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002539 PthreadMutex m(PTHREAD_MUTEX_NORMAL, PTHREAD_PRIO_INHERIT);
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002540
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002541 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002542 clock_gettime(clock, &ts);
Andy Hung5e19b182023-12-11 19:28:02 -08002543 const int64_t start_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2544
2545 // add a second to get deadline.
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002546 ts.tv_sec += 1;
Andy Hung5e19b182023-12-11 19:28:02 -08002547
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002548 ASSERT_EQ(0, lock_function(&m.lock, &ts));
2549
2550 struct ThreadArgs {
2551 clockid_t clock;
2552 int (*lock_function)(pthread_mutex_t* __mutex, const timespec* __timeout);
2553 PthreadMutex& m;
2554 };
2555
2556 ThreadArgs thread_args = {
2557 .clock = clock,
2558 .lock_function = lock_function,
2559 .m = m,
2560 };
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002561
2562 auto ThreadFn = [](void* arg) -> void* {
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002563 auto args = static_cast<ThreadArgs*>(arg);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002564 timespec ts;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002565 clock_gettime(args->clock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002566 ts.tv_sec += 1;
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002567 intptr_t result = args->lock_function(&args->m.lock, &ts);
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002568 return reinterpret_cast<void*>(result);
2569 };
2570
2571 pthread_t thread;
Yi Kong32bc0fc2018-08-02 17:31:13 -07002572 ASSERT_EQ(0, pthread_create(&thread, nullptr, ThreadFn, &thread_args));
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002573 void* result;
2574 ASSERT_EQ(0, pthread_join(thread, &result));
2575 ASSERT_EQ(ETIMEDOUT, reinterpret_cast<intptr_t>(result));
Andy Hung5e19b182023-12-11 19:28:02 -08002576
2577 // The timedlock must have waited at least 1 second before returning.
2578 clock_gettime(clock, &ts);
2579 const int64_t end_ns = ts.tv_sec * NS_PER_S + ts.tv_nsec;
2580 ASSERT_GT(end_ns - start_ns, NS_PER_S);
2581
Yabin Cui6b9c85b2018-01-23 12:56:18 -08002582 ASSERT_EQ(0, pthread_mutex_unlock(&m.lock));
2583}
2584
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002585TEST(pthread, pthread_mutex_timedlock_pi) {
2586 pthread_mutex_timedlock_pi_helper(CLOCK_REALTIME, pthread_mutex_timedlock);
2587}
2588
2589TEST(pthread, pthread_mutex_timedlock_monotonic_np_pi) {
2590#if defined(__BIONIC__)
2591 pthread_mutex_timedlock_pi_helper(CLOCK_MONOTONIC, pthread_mutex_timedlock_monotonic_np);
2592#else // __BIONIC__
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002593 GTEST_SKIP() << "pthread_mutex_timedlock_monotonic_np not available";
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002594#endif // __BIONIC__
2595}
2596
Tom Cherry69010802019-05-07 20:33:05 -07002597TEST(pthread, pthread_mutex_clocklock_pi) {
2598#if defined(__BIONIC__)
2599 pthread_mutex_timedlock_pi_helper(
2600 CLOCK_MONOTONIC, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2601 return pthread_mutex_clocklock(__mutex, CLOCK_MONOTONIC, __timeout);
2602 });
2603 pthread_mutex_timedlock_pi_helper(
2604 CLOCK_REALTIME, [](pthread_mutex_t* __mutex, const timespec* __timeout) {
2605 return pthread_mutex_clocklock(__mutex, CLOCK_REALTIME, __timeout);
2606 });
2607#else // __BIONIC__
2608 GTEST_SKIP() << "pthread_mutex_clocklock not available";
2609#endif // __BIONIC__
2610}
2611
2612TEST(pthread, pthread_mutex_clocklock_invalid) {
2613#if defined(__BIONIC__)
2614 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
2615 timespec ts;
2616 EXPECT_EQ(EINVAL, pthread_mutex_clocklock(&mutex, CLOCK_PROCESS_CPUTIME_ID, &ts));
2617#else // __BIONIC__
2618 GTEST_SKIP() << "pthread_mutex_clocklock not available";
2619#endif // __BIONIC__
2620}
2621
Elliott Hughese657eb42021-02-18 17:11:56 -08002622TEST_F(pthread_DeathTest, pthread_mutex_using_destroyed_mutex) {
Yabin Cui9651fdf2018-03-14 12:02:21 -07002623#if defined(__BIONIC__)
2624 pthread_mutex_t m;
2625 ASSERT_EQ(0, pthread_mutex_init(&m, nullptr));
2626 ASSERT_EQ(0, pthread_mutex_destroy(&m));
2627 ASSERT_EXIT(pthread_mutex_lock(&m), ::testing::KilledBySignal(SIGABRT),
2628 "pthread_mutex_lock called on a destroyed mutex");
2629 ASSERT_EXIT(pthread_mutex_unlock(&m), ::testing::KilledBySignal(SIGABRT),
2630 "pthread_mutex_unlock called on a destroyed mutex");
2631 ASSERT_EXIT(pthread_mutex_trylock(&m), ::testing::KilledBySignal(SIGABRT),
2632 "pthread_mutex_trylock called on a destroyed mutex");
2633 timespec ts;
2634 ASSERT_EXIT(pthread_mutex_timedlock(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2635 "pthread_mutex_timedlock called on a destroyed mutex");
Tom Cherryc6b5bcd2018-03-05 14:14:44 -08002636 ASSERT_EXIT(pthread_mutex_timedlock_monotonic_np(&m, &ts), ::testing::KilledBySignal(SIGABRT),
2637 "pthread_mutex_timedlock_monotonic_np called on a destroyed mutex");
Tom Cherry69010802019-05-07 20:33:05 -07002638 ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_MONOTONIC, &ts), ::testing::KilledBySignal(SIGABRT),
2639 "pthread_mutex_clocklock called on a destroyed mutex");
2640 ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_REALTIME, &ts), ::testing::KilledBySignal(SIGABRT),
2641 "pthread_mutex_clocklock called on a destroyed mutex");
2642 ASSERT_EXIT(pthread_mutex_clocklock(&m, CLOCK_PROCESS_CPUTIME_ID, &ts),
2643 ::testing::KilledBySignal(SIGABRT),
2644 "pthread_mutex_clocklock called on a destroyed mutex");
Yabin Cui9651fdf2018-03-14 12:02:21 -07002645 ASSERT_EXIT(pthread_mutex_destroy(&m), ::testing::KilledBySignal(SIGABRT),
2646 "pthread_mutex_destroy called on a destroyed mutex");
2647#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002648 GTEST_SKIP() << "bionic-only test";
Yabin Cui9651fdf2018-03-14 12:02:21 -07002649#endif
2650}
2651
Yabin Cuib5845722015-03-16 22:46:42 -07002652class StrictAlignmentAllocator {
2653 public:
2654 void* allocate(size_t size, size_t alignment) {
2655 char* p = new char[size + alignment * 2];
2656 allocated_array.push_back(p);
2657 while (!is_strict_aligned(p, alignment)) {
2658 ++p;
2659 }
2660 return p;
2661 }
2662
2663 ~StrictAlignmentAllocator() {
Elliott Hughes0b2acdf2015-10-02 18:25:19 -07002664 for (const auto& p : allocated_array) {
2665 delete[] p;
Yabin Cuib5845722015-03-16 22:46:42 -07002666 }
2667 }
2668
2669 private:
2670 bool is_strict_aligned(char* p, size_t alignment) {
2671 return (reinterpret_cast<uintptr_t>(p) % (alignment * 2)) == alignment;
2672 }
2673
2674 std::vector<char*> allocated_array;
2675};
2676
2677TEST(pthread, pthread_types_allow_four_bytes_alignment) {
2678#if defined(__BIONIC__)
2679 // For binary compatibility with old version, we need to allow 4-byte aligned data for pthread types.
2680 StrictAlignmentAllocator allocator;
2681 pthread_mutex_t* mutex = reinterpret_cast<pthread_mutex_t*>(
2682 allocator.allocate(sizeof(pthread_mutex_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002683 ASSERT_EQ(0, pthread_mutex_init(mutex, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002684 ASSERT_EQ(0, pthread_mutex_lock(mutex));
2685 ASSERT_EQ(0, pthread_mutex_unlock(mutex));
2686 ASSERT_EQ(0, pthread_mutex_destroy(mutex));
2687
2688 pthread_cond_t* cond = reinterpret_cast<pthread_cond_t*>(
2689 allocator.allocate(sizeof(pthread_cond_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002690 ASSERT_EQ(0, pthread_cond_init(cond, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002691 ASSERT_EQ(0, pthread_cond_signal(cond));
2692 ASSERT_EQ(0, pthread_cond_broadcast(cond));
2693 ASSERT_EQ(0, pthread_cond_destroy(cond));
2694
2695 pthread_rwlock_t* rwlock = reinterpret_cast<pthread_rwlock_t*>(
2696 allocator.allocate(sizeof(pthread_rwlock_t), 4));
Yi Kong32bc0fc2018-08-02 17:31:13 -07002697 ASSERT_EQ(0, pthread_rwlock_init(rwlock, nullptr));
Yabin Cuib5845722015-03-16 22:46:42 -07002698 ASSERT_EQ(0, pthread_rwlock_rdlock(rwlock));
2699 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2700 ASSERT_EQ(0, pthread_rwlock_wrlock(rwlock));
2701 ASSERT_EQ(0, pthread_rwlock_unlock(rwlock));
2702 ASSERT_EQ(0, pthread_rwlock_destroy(rwlock));
2703
2704#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002705 GTEST_SKIP() << "bionic-only test";
Yabin Cuib5845722015-03-16 22:46:42 -07002706#endif
2707}
Christopher Ferris60907c72015-06-09 18:46:15 -07002708
2709TEST(pthread, pthread_mutex_lock_null_32) {
2710#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002711 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2712 // EINVAL in that case: http://b/19995172.
2713 //
2714 // We decorate the public defintion with _Nonnull so that people recompiling
2715 // their code with get a warning and might fix their bug, but need to pass
2716 // NULL here to test that we remain compatible.
2717 pthread_mutex_t* null_value = nullptr;
2718 ASSERT_EQ(EINVAL, pthread_mutex_lock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002719#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002720 GTEST_SKIP() << "32-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002721#endif
2722}
2723
2724TEST(pthread, pthread_mutex_unlock_null_32) {
2725#if defined(__BIONIC__) && !defined(__LP64__)
Dan Albertbaa2a972015-08-13 16:58:50 -07002726 // For LP32, the pthread lock/unlock functions allow a NULL mutex and return
2727 // EINVAL in that case: http://b/19995172.
2728 //
2729 // We decorate the public defintion with _Nonnull so that people recompiling
2730 // their code with get a warning and might fix their bug, but need to pass
2731 // NULL here to test that we remain compatible.
2732 pthread_mutex_t* null_value = nullptr;
2733 ASSERT_EQ(EINVAL, pthread_mutex_unlock(null_value));
Christopher Ferris60907c72015-06-09 18:46:15 -07002734#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002735 GTEST_SKIP() << "32-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002736#endif
2737}
2738
2739TEST_F(pthread_DeathTest, pthread_mutex_lock_null_64) {
2740#if defined(__BIONIC__) && defined(__LP64__)
2741 pthread_mutex_t* null_value = nullptr;
2742 ASSERT_EXIT(pthread_mutex_lock(null_value), testing::KilledBySignal(SIGSEGV), "");
2743#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002744 GTEST_SKIP() << "64-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002745#endif
2746}
2747
2748TEST_F(pthread_DeathTest, pthread_mutex_unlock_null_64) {
2749#if defined(__BIONIC__) && defined(__LP64__)
2750 pthread_mutex_t* null_value = nullptr;
2751 ASSERT_EXIT(pthread_mutex_unlock(null_value), testing::KilledBySignal(SIGSEGV), "");
2752#else
Elliott Hughesbcaa4542019-03-08 15:20:23 -08002753 GTEST_SKIP() << "64-bit bionic-only test";
Christopher Ferris60907c72015-06-09 18:46:15 -07002754#endif
2755}
Yabin Cui33ac04a2015-09-22 11:16:15 -07002756
2757extern _Unwind_Reason_Code FrameCounter(_Unwind_Context* ctx, void* arg);
2758
2759static volatile bool signal_handler_on_altstack_done;
2760
Josh Gao61db9ac2017-03-15 19:42:05 -07002761__attribute__((__noinline__))
2762static void signal_handler_backtrace() {
2763 // Check if we have enough stack space for unwinding.
2764 int count = 0;
2765 _Unwind_Backtrace(FrameCounter, &count);
2766 ASSERT_GT(count, 0);
2767}
2768
2769__attribute__((__noinline__))
2770static void signal_handler_logging() {
2771 // Check if we have enough stack space for logging.
2772 std::string s(2048, '*');
2773 GTEST_LOG_(INFO) << s;
2774 signal_handler_on_altstack_done = true;
2775}
2776
2777__attribute__((__noinline__))
2778static void signal_handler_snprintf() {
2779 // Check if we have enough stack space for snprintf to a PATH_MAX buffer, plus some extra.
2780 char buf[PATH_MAX + 2048];
2781 ASSERT_GT(snprintf(buf, sizeof(buf), "/proc/%d/status", getpid()), 0);
2782}
2783
Yabin Cui33ac04a2015-09-22 11:16:15 -07002784static void SignalHandlerOnAltStack(int signo, siginfo_t*, void*) {
2785 ASSERT_EQ(SIGUSR1, signo);
Josh Gao61db9ac2017-03-15 19:42:05 -07002786 signal_handler_backtrace();
2787 signal_handler_logging();
2788 signal_handler_snprintf();
Yabin Cui33ac04a2015-09-22 11:16:15 -07002789}
2790
Josh Gao415daa82017-03-06 17:45:33 -08002791TEST(pthread, big_enough_signal_stack) {
Yabin Cui33ac04a2015-09-22 11:16:15 -07002792 signal_handler_on_altstack_done = false;
2793 ScopedSignalHandler handler(SIGUSR1, SignalHandlerOnAltStack, SA_SIGINFO | SA_ONSTACK);
2794 kill(getpid(), SIGUSR1);
2795 ASSERT_TRUE(signal_handler_on_altstack_done);
2796}
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002797
2798TEST(pthread, pthread_barrierattr_smoke) {
2799 pthread_barrierattr_t attr;
2800 ASSERT_EQ(0, pthread_barrierattr_init(&attr));
2801 int pshared;
2802 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2803 ASSERT_EQ(PTHREAD_PROCESS_PRIVATE, pshared);
2804 ASSERT_EQ(0, pthread_barrierattr_setpshared(&attr, PTHREAD_PROCESS_SHARED));
2805 ASSERT_EQ(0, pthread_barrierattr_getpshared(&attr, &pshared));
2806 ASSERT_EQ(PTHREAD_PROCESS_SHARED, pshared);
2807 ASSERT_EQ(0, pthread_barrierattr_destroy(&attr));
2808}
2809
Yabin Cui81d27972016-03-22 13:45:55 -07002810struct BarrierTestHelperData {
2811 size_t thread_count;
2812 pthread_barrier_t barrier;
2813 std::atomic<int> finished_mask;
2814 std::atomic<int> serial_thread_count;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002815 size_t iteration_count;
Yabin Cui81d27972016-03-22 13:45:55 -07002816 std::atomic<size_t> finished_iteration_count;
2817
2818 BarrierTestHelperData(size_t thread_count, size_t iteration_count)
2819 : thread_count(thread_count), finished_mask(0), serial_thread_count(0),
2820 iteration_count(iteration_count), finished_iteration_count(0) {
2821 }
2822};
2823
2824struct BarrierTestHelperArg {
2825 int id;
2826 BarrierTestHelperData* data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002827};
2828
2829static void BarrierTestHelper(BarrierTestHelperArg* arg) {
Yabin Cui81d27972016-03-22 13:45:55 -07002830 for (size_t i = 0; i < arg->data->iteration_count; ++i) {
2831 int result = pthread_barrier_wait(&arg->data->barrier);
2832 if (result == PTHREAD_BARRIER_SERIAL_THREAD) {
2833 arg->data->serial_thread_count++;
2834 } else {
2835 ASSERT_EQ(0, result);
2836 }
Yabin Cuid5c04c52017-05-02 12:57:39 -07002837 int mask = arg->data->finished_mask.fetch_or(1 << arg->id);
Yabin Cuiab4cddc2017-05-02 16:18:13 -07002838 mask |= 1 << arg->id;
Yabin Cuid5c04c52017-05-02 12:57:39 -07002839 if (mask == ((1 << arg->data->thread_count) - 1)) {
Yabin Cui81d27972016-03-22 13:45:55 -07002840 ASSERT_EQ(1, arg->data->serial_thread_count);
2841 arg->data->finished_iteration_count++;
2842 arg->data->finished_mask = 0;
2843 arg->data->serial_thread_count = 0;
2844 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002845 }
2846}
2847
2848TEST(pthread, pthread_barrier_smoke) {
2849 const size_t BARRIER_ITERATION_COUNT = 10;
2850 const size_t BARRIER_THREAD_COUNT = 10;
Yabin Cui81d27972016-03-22 13:45:55 -07002851 BarrierTestHelperData data(BARRIER_THREAD_COUNT, BARRIER_ITERATION_COUNT);
2852 ASSERT_EQ(0, pthread_barrier_init(&data.barrier, nullptr, data.thread_count));
2853 std::vector<pthread_t> threads(data.thread_count);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002854 std::vector<BarrierTestHelperArg> args(threads.size());
2855 for (size_t i = 0; i < threads.size(); ++i) {
Yabin Cui81d27972016-03-22 13:45:55 -07002856 args[i].id = i;
2857 args[i].data = &data;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002858 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2859 reinterpret_cast<void* (*)(void*)>(BarrierTestHelper), &args[i]));
2860 }
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002861 for (size_t i = 0; i < threads.size(); ++i) {
2862 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2863 }
Yabin Cui81d27972016-03-22 13:45:55 -07002864 ASSERT_EQ(data.iteration_count, data.finished_iteration_count);
2865 ASSERT_EQ(0, pthread_barrier_destroy(&data.barrier));
2866}
2867
2868struct BarrierDestroyTestArg {
2869 std::atomic<int> tid;
2870 pthread_barrier_t* barrier;
2871};
2872
2873static void BarrierDestroyTestHelper(BarrierDestroyTestArg* arg) {
2874 arg->tid = gettid();
2875 ASSERT_EQ(0, pthread_barrier_wait(arg->barrier));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002876}
2877
2878TEST(pthread, pthread_barrier_destroy) {
2879 pthread_barrier_t barrier;
2880 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, 2));
2881 pthread_t thread;
Yabin Cui81d27972016-03-22 13:45:55 -07002882 BarrierDestroyTestArg arg;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002883 arg.tid = 0;
2884 arg.barrier = &barrier;
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002885 ASSERT_EQ(0, pthread_create(&thread, nullptr,
Yabin Cui81d27972016-03-22 13:45:55 -07002886 reinterpret_cast<void* (*)(void*)>(BarrierDestroyTestHelper), &arg));
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002887 WaitUntilThreadSleep(arg.tid);
2888 ASSERT_EQ(EBUSY, pthread_barrier_destroy(&barrier));
2889 ASSERT_EQ(PTHREAD_BARRIER_SERIAL_THREAD, pthread_barrier_wait(&barrier));
2890 // Verify if the barrier can be destroyed directly after pthread_barrier_wait().
2891 ASSERT_EQ(0, pthread_barrier_destroy(&barrier));
2892 ASSERT_EQ(0, pthread_join(thread, nullptr));
2893#if defined(__BIONIC__)
2894 ASSERT_EQ(EINVAL, pthread_barrier_destroy(&barrier));
2895#endif
2896}
2897
2898struct BarrierOrderingTestHelperArg {
2899 pthread_barrier_t* barrier;
2900 size_t* array;
2901 size_t array_length;
2902 size_t id;
2903};
2904
2905void BarrierOrderingTestHelper(BarrierOrderingTestHelperArg* arg) {
2906 const size_t ITERATION_COUNT = 10000;
2907 for (size_t i = 1; i <= ITERATION_COUNT; ++i) {
2908 arg->array[arg->id] = i;
Yabin Cuic9a659c2015-11-05 15:36:08 -08002909 int result = pthread_barrier_wait(arg->barrier);
2910 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002911 for (size_t j = 0; j < arg->array_length; ++j) {
2912 ASSERT_EQ(i, arg->array[j]);
2913 }
Yabin Cuic9a659c2015-11-05 15:36:08 -08002914 result = pthread_barrier_wait(arg->barrier);
2915 ASSERT_TRUE(result == 0 || result == PTHREAD_BARRIER_SERIAL_THREAD);
Yabin Cuie7c2fff2015-11-05 22:06:09 -08002916 }
2917}
2918
2919TEST(pthread, pthread_barrier_check_ordering) {
2920 const size_t THREAD_COUNT = 4;
2921 pthread_barrier_t barrier;
2922 ASSERT_EQ(0, pthread_barrier_init(&barrier, nullptr, THREAD_COUNT));
2923 size_t array[THREAD_COUNT];
2924 std::vector<pthread_t> threads(THREAD_COUNT);
2925 std::vector<BarrierOrderingTestHelperArg> args(THREAD_COUNT);
2926 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2927 args[i].barrier = &barrier;
2928 args[i].array = array;
2929 args[i].array_length = THREAD_COUNT;
2930 args[i].id = i;
2931 ASSERT_EQ(0, pthread_create(&threads[i], nullptr,
2932 reinterpret_cast<void* (*)(void*)>(BarrierOrderingTestHelper),
2933 &args[i]));
2934 }
2935 for (size_t i = 0; i < THREAD_COUNT; ++i) {
2936 ASSERT_EQ(0, pthread_join(threads[i], nullptr));
2937 }
2938}
Yabin Cuife3a83a2015-11-17 16:03:18 -08002939
Elliott Hughes463faad2018-07-06 14:34:49 -07002940TEST(pthread, pthread_barrier_init_zero_count) {
2941 pthread_barrier_t barrier;
2942 ASSERT_EQ(EINVAL, pthread_barrier_init(&barrier, nullptr, 0));
2943}
2944
Yabin Cuife3a83a2015-11-17 16:03:18 -08002945TEST(pthread, pthread_spinlock_smoke) {
2946 pthread_spinlock_t lock;
2947 ASSERT_EQ(0, pthread_spin_init(&lock, 0));
2948 ASSERT_EQ(0, pthread_spin_trylock(&lock));
2949 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2950 ASSERT_EQ(0, pthread_spin_lock(&lock));
2951 ASSERT_EQ(EBUSY, pthread_spin_trylock(&lock));
2952 ASSERT_EQ(0, pthread_spin_unlock(&lock));
2953 ASSERT_EQ(0, pthread_spin_destroy(&lock));
2954}
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002955
Elliott Hughes8aecba72017-10-17 15:34:41 -07002956TEST(pthread, pthread_attr_getdetachstate__pthread_attr_setdetachstate) {
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002957 pthread_attr_t attr;
2958 ASSERT_EQ(0, pthread_attr_init(&attr));
2959
Elliott Hughes8aecba72017-10-17 15:34:41 -07002960 int state;
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002961 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002962 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2963 ASSERT_EQ(PTHREAD_CREATE_DETACHED, state);
2964
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002965 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002966 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2967 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
2968
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002969 ASSERT_EQ(EINVAL, pthread_attr_setdetachstate(&attr, 123));
Elliott Hughes8aecba72017-10-17 15:34:41 -07002970 ASSERT_EQ(0, pthread_attr_getdetachstate(&attr, &state));
2971 ASSERT_EQ(PTHREAD_CREATE_JOINABLE, state);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002972}
2973
2974TEST(pthread, pthread_create__mmap_failures) {
Evgeny Eltsinb4f7aaa2020-06-09 15:49:20 +02002975 // After thread is successfully created, native_bridge might need more memory to run it.
2976 SKIP_WITH_NATIVE_BRIDGE;
2977
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002978 pthread_attr_t attr;
2979 ASSERT_EQ(0, pthread_attr_init(&attr));
2980 ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2981
2982 const auto kPageSize = sysconf(_SC_PAGE_SIZE);
2983
Elliott Hughes57512982017-10-02 22:49:18 -07002984 // Use up all the VMAs. By default this is 64Ki (though some will already be in use).
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002985 std::vector<void*> pages;
Elliott Hughes57512982017-10-02 22:49:18 -07002986 pages.reserve(64 * 1024);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07002987 int prot = PROT_NONE;
2988 while (true) {
2989 void* page = mmap(nullptr, kPageSize, prot, MAP_ANON|MAP_PRIVATE, -1, 0);
2990 if (page == MAP_FAILED) break;
2991 pages.push_back(page);
2992 prot = (prot == PROT_NONE) ? PROT_READ : PROT_NONE;
2993 }
2994
2995 // Try creating threads, freeing up a page each time we fail.
2996 size_t EAGAIN_count = 0;
2997 size_t i = 0;
2998 for (; i < pages.size(); ++i) {
2999 pthread_t t;
3000 int status = pthread_create(&t, &attr, IdFn, nullptr);
3001 if (status != EAGAIN) break;
3002 ++EAGAIN_count;
3003 ASSERT_EQ(0, munmap(pages[i], kPageSize));
3004 }
3005
Ryan Prichard45d13492019-01-03 02:51:30 -08003006 // Creating a thread uses at least three VMAs: the combined stack and TLS, and a guard on each
3007 // side. So we should have seen at least three failures.
3008 ASSERT_GE(EAGAIN_count, 3U);
Elliott Hughes53dc9dd2017-09-19 14:02:50 -07003009
3010 for (; i < pages.size(); ++i) {
3011 ASSERT_EQ(0, munmap(pages[i], kPageSize));
3012 }
3013}
Elliott Hughesdff08ce2017-10-16 09:58:45 -07003014
3015TEST(pthread, pthread_setschedparam) {
3016 sched_param p = { .sched_priority = INT_MIN };
3017 ASSERT_EQ(EINVAL, pthread_setschedparam(pthread_self(), INT_MIN, &p));
3018}
3019
3020TEST(pthread, pthread_setschedprio) {
3021 ASSERT_EQ(EINVAL, pthread_setschedprio(pthread_self(), INT_MIN));
3022}
Elliott Hughes8aecba72017-10-17 15:34:41 -07003023
3024TEST(pthread, pthread_attr_getinheritsched__pthread_attr_setinheritsched) {
3025 pthread_attr_t attr;
3026 ASSERT_EQ(0, pthread_attr_init(&attr));
3027
3028 int state;
3029 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3030 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
3031 ASSERT_EQ(PTHREAD_INHERIT_SCHED, state);
3032
3033 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3034 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
3035 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
3036
3037 ASSERT_EQ(EINVAL, pthread_attr_setinheritsched(&attr, 123));
3038 ASSERT_EQ(0, pthread_attr_getinheritsched(&attr, &state));
3039 ASSERT_EQ(PTHREAD_EXPLICIT_SCHED, state);
3040}
3041
3042TEST(pthread, pthread_attr_setinheritsched__PTHREAD_INHERIT_SCHED__PTHREAD_EXPLICIT_SCHED) {
3043 pthread_attr_t attr;
3044 ASSERT_EQ(0, pthread_attr_init(&attr));
3045
3046 // If we set invalid scheduling attributes but choose to inherit, everything's fine...
3047 sched_param param = { .sched_priority = sched_get_priority_max(SCHED_FIFO) + 1 };
3048 ASSERT_EQ(0, pthread_attr_setschedparam(&attr, &param));
3049 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_FIFO));
3050 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3051
3052 pthread_t t;
3053 ASSERT_EQ(0, pthread_create(&t, &attr, IdFn, nullptr));
3054 ASSERT_EQ(0, pthread_join(t, nullptr));
3055
Elliott Hughes7a660662017-10-30 09:26:06 -07003056#if defined(__LP64__)
3057 // If we ask to use them, though, we'll see a failure...
Elliott Hughes8aecba72017-10-17 15:34:41 -07003058 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3059 ASSERT_EQ(EINVAL, pthread_create(&t, &attr, IdFn, nullptr));
Elliott Hughes7a660662017-10-30 09:26:06 -07003060#else
3061 // For backwards compatibility with broken apps, we just ignore failures
3062 // to set scheduler attributes on LP32.
3063#endif
Elliott Hughes8aecba72017-10-17 15:34:41 -07003064}
3065
3066TEST(pthread, pthread_attr_setinheritsched_PTHREAD_INHERIT_SCHED_takes_effect) {
3067 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3068 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
Elliott Hughesbcaa4542019-03-08 15:20:23 -08003069 if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
Elliott Hughes8aecba72017-10-17 15:34:41 -07003070 ASSERT_EQ(0, rc);
3071
3072 pthread_attr_t attr;
3073 ASSERT_EQ(0, pthread_attr_init(&attr));
3074 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3075
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003076 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07003077 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003078 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07003079 int actual_policy;
3080 sched_param actual_param;
3081 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3082 ASSERT_EQ(SCHED_FIFO, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003083 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07003084 ASSERT_EQ(0, pthread_join(t, nullptr));
3085}
3086
3087TEST(pthread, pthread_attr_setinheritsched_PTHREAD_EXPLICIT_SCHED_takes_effect) {
3088 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3089 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
Elliott Hughesbcaa4542019-03-08 15:20:23 -08003090 if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
Elliott Hughes8aecba72017-10-17 15:34:41 -07003091 ASSERT_EQ(0, rc);
3092
3093 pthread_attr_t attr;
3094 ASSERT_EQ(0, pthread_attr_init(&attr));
3095 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED));
3096 ASSERT_EQ(0, pthread_attr_setschedpolicy(&attr, SCHED_OTHER));
3097
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003098 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07003099 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003100 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07003101 int actual_policy;
3102 sched_param actual_param;
3103 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3104 ASSERT_EQ(SCHED_OTHER, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003105 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07003106 ASSERT_EQ(0, pthread_join(t, nullptr));
3107}
3108
3109TEST(pthread, pthread_attr_setinheritsched__takes_effect_despite_SCHED_RESET_ON_FORK) {
3110 sched_param param = { .sched_priority = sched_get_priority_min(SCHED_FIFO) };
3111 int rc = pthread_setschedparam(pthread_self(), SCHED_FIFO | SCHED_RESET_ON_FORK, &param);
Elliott Hughesbcaa4542019-03-08 15:20:23 -08003112 if (rc == EPERM) GTEST_SKIP() << "pthread_setschedparam failed with EPERM";
Elliott Hughes8aecba72017-10-17 15:34:41 -07003113 ASSERT_EQ(0, rc);
3114
3115 pthread_attr_t attr;
3116 ASSERT_EQ(0, pthread_attr_init(&attr));
3117 ASSERT_EQ(0, pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
3118
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003119 SpinFunctionHelper spin_helper;
Elliott Hughes8aecba72017-10-17 15:34:41 -07003120 pthread_t t;
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003121 ASSERT_EQ(0, pthread_create(&t, &attr, spin_helper.GetFunction(), nullptr));
Elliott Hughes8aecba72017-10-17 15:34:41 -07003122 int actual_policy;
3123 sched_param actual_param;
3124 ASSERT_EQ(0, pthread_getschedparam(t, &actual_policy, &actual_param));
3125 ASSERT_EQ(SCHED_FIFO | SCHED_RESET_ON_FORK, actual_policy);
Elliott Hughes0bd9d132017-11-02 13:11:13 -07003126 spin_helper.UnSpin();
Elliott Hughes8aecba72017-10-17 15:34:41 -07003127 ASSERT_EQ(0, pthread_join(t, nullptr));
3128}
Peter Collingbourne5d3aa862020-09-11 15:05:17 -07003129
3130extern "C" bool android_run_on_all_threads(bool (*func)(void*), void* arg);
3131
3132TEST(pthread, run_on_all_threads) {
3133#if defined(__BIONIC__)
3134 pthread_t t;
3135 ASSERT_EQ(
3136 0, pthread_create(
3137 &t, nullptr,
3138 [](void*) -> void* {
3139 pthread_attr_t detached;
3140 if (pthread_attr_init(&detached) != 0 ||
3141 pthread_attr_setdetachstate(&detached, PTHREAD_CREATE_DETACHED) != 0) {
3142 return reinterpret_cast<void*>(errno);
3143 }
3144
3145 for (int i = 0; i != 1000; ++i) {
3146 pthread_t t1, t2;
3147 if (pthread_create(
3148 &t1, &detached, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3149 pthread_create(
3150 &t2, nullptr, [](void*) -> void* { return nullptr; }, nullptr) != 0 ||
3151 pthread_join(t2, nullptr) != 0) {
3152 return reinterpret_cast<void*>(errno);
3153 }
3154 }
3155
3156 if (pthread_attr_destroy(&detached) != 0) {
3157 return reinterpret_cast<void*>(errno);
3158 }
3159 return nullptr;
3160 },
3161 nullptr));
3162
3163 for (int i = 0; i != 1000; ++i) {
3164 ASSERT_TRUE(android_run_on_all_threads([](void* arg) { return arg == nullptr; }, nullptr));
3165 }
3166
3167 void *retval;
3168 ASSERT_EQ(0, pthread_join(t, &retval));
3169 ASSERT_EQ(nullptr, retval);
3170#else
3171 GTEST_SKIP() << "bionic-only test";
3172#endif
3173}
Elliott Hughese117d6e2024-11-14 17:31:13 +00003174
3175TEST(pthread, pthread_getaffinity_np_failure) {
3176 // Trivial test of the errno-preserving/returning behavior.
3177#pragma clang diagnostic push
3178#pragma clang diagnostic ignored "-Wnonnull"
3179 errno = 0;
3180 ASSERT_EQ(EINVAL, pthread_getaffinity_np(pthread_self(), 0, nullptr));
3181 ASSERT_ERRNO(0);
3182#pragma clang diagnostic pop
3183}
3184
Elliott Hughesd354c422024-11-18 14:49:45 +00003185TEST(pthread, pthread_getaffinity) {
3186 cpu_set_t set;
3187 CPU_ZERO(&set);
3188 ASSERT_EQ(0, pthread_getaffinity_np(pthread_self(), sizeof(set), &set));
3189 ASSERT_GT(CPU_COUNT(&set), 0);
3190}
3191
Elliott Hughese117d6e2024-11-14 17:31:13 +00003192TEST(pthread, pthread_setaffinity_np_failure) {
3193 // Trivial test of the errno-preserving/returning behavior.
3194#pragma clang diagnostic push
3195#pragma clang diagnostic ignored "-Wnonnull"
3196 errno = 0;
3197 ASSERT_EQ(EINVAL, pthread_setaffinity_np(pthread_self(), 0, nullptr));
3198 ASSERT_ERRNO(0);
3199#pragma clang diagnostic pop
3200}
Elliott Hughesfab5e6f2024-11-18 21:37:20 +00003201
3202TEST(pthread, pthread_setaffinity) {
3203 cpu_set_t set;
3204 CPU_ZERO(&set);
3205 ASSERT_EQ(0, pthread_getaffinity_np(pthread_self(), sizeof(set), &set));
3206 // It's hard to make any more general claim than this,
3207 // but it ought to be safe to ask for the same affinity you already have.
3208 ASSERT_EQ(0, pthread_setaffinity_np(pthread_self(), sizeof(set), &set));
3209}
Tamas Petz83223772025-05-13 11:49:29 +02003210
3211#if defined(__aarch64__)
3212
3213static void* sme_state_checking_thread(void*) {
3214 // Expected state in the child thread:
3215 // - PSTATE.SM is 0
3216 // - PSTATE.ZA is 0
3217 // - TPIDR2_EL0 is 0
3218 EXPECT_FALSE(sme_is_sm_on());
3219 EXPECT_FALSE(sme_is_za_on());
3220 EXPECT_EQ(0UL, sme_tpidr2_el0());
3221
3222 return nullptr;
3223}
3224
3225static void create_thread() {
3226 pthread_t thread;
3227 // Even if these asserts fail sme_state_cleanup() will still be run.
3228 ASSERT_EQ(0, pthread_create(&thread, nullptr, &sme_state_checking_thread, nullptr));
3229 ASSERT_EQ(0, pthread_join(thread, nullptr));
3230}
3231
3232// It is expected that the new thread is started with SME off.
3233TEST(pthread, pthread_create_with_sme_off) {
3234 if (!sme_is_enabled()) {
3235 GTEST_SKIP() << "FEAT_SME is not enabled on the device.";
3236 }
3237
3238 // It is safe to call __arm_za_disable(). This is required to avoid inter-test dependencies.
3239 __arm_za_disable();
3240 create_thread();
3241 sme_state_cleanup();
3242}
3243
3244// It is expected that the new thread is started with SME off.
3245TEST(pthread, pthread_create_with_sme_dormant_state) {
3246 if (!sme_is_enabled()) {
3247 GTEST_SKIP() << "FEAT_SME is not enabled on the device.";
3248 }
3249
3250 __arm_za_disable();
3251 sme_dormant_caller(&create_thread);
3252 sme_state_cleanup();
3253}
3254
3255#endif // defined(__aarch64__)