[libcxx testing] Make three locking tests more reliable

The challenge with measuring time in tests is that slow and/or busy
machines can cause tests to fail in unexpected ways. After this change,
three tests should be much more robust. The only remaining and tiny race
that I can think of is preemption after `--countDown`. That being said,
the race isn't fixable because the standard library doesn't provide a
way to count threads that are waiting to acquire a lock.

Reviewers: ldionne, EricWF, howard.hinnant, mclow.lists, #libc

Reviewed By: ldionne, #libc

Subscribers: dexonsmith, jfb, broadwaylamb, libcxx-commits

Tags: #libc

Differential Revision: https://reviews.llvm.org/D79406
This commit is contained in:
David Zarzycki 2020-05-09 11:10:41 -04:00
parent 0b9783350b
commit 4f4ce13944
3 changed files with 106 additions and 110 deletions

View File

@ -9,8 +9,6 @@
// UNSUPPORTED: libcpp-has-no-threads // UNSUPPORTED: libcpp-has-no-threads
// UNSUPPORTED: c++98, c++03, c++11 // UNSUPPORTED: c++98, c++03, c++11
// ALLOW_RETRIES: 2
// shared_timed_mutex was introduced in macosx10.12 // shared_timed_mutex was introduced in macosx10.12
// UNSUPPORTED: with_system_cxx_lib=macosx10.11 // UNSUPPORTED: with_system_cxx_lib=macosx10.11
// UNSUPPORTED: with_system_cxx_lib=macosx10.10 // UNSUPPORTED: with_system_cxx_lib=macosx10.10
@ -37,36 +35,32 @@ typedef Clock::duration duration;
typedef std::chrono::milliseconds ms; typedef std::chrono::milliseconds ms;
typedef std::chrono::nanoseconds ns; typedef std::chrono::nanoseconds ns;
std::atomic<bool> ready(false);
time_point start;
ms WaitTime = ms(250); ms WaitTime = ms(250);
// Thread sanitizer causes more overhead and will sometimes cause this test
// to fail. To prevent this we give Thread sanitizer more time to complete the
// test.
#if !TEST_HAS_FEATURE(thread_sanitizer)
ms Tolerance = ms(50);
#else
ms Tolerance = ms(100);
#endif
void f() void f()
{ {
time_point t0 = Clock::now(); ready.store(true);
m.lock(); m.lock();
time_point t1 = Clock::now(); time_point t0 = start;
m.unlock(); time_point t1 = Clock::now();
ns d = t1 - t0 - ms(250); m.unlock();
assert(d < ms(50)); // within 50ms assert(t0.time_since_epoch() > ms(0));
assert(t1 - t0 >= WaitTime);
} }
int main(int, char**) int main(int, char**)
{ {
m.lock(); m.lock();
std::thread t(f); std::thread t(f);
std::this_thread::sleep_for(ms(250)); while (!ready)
m.unlock(); std::this_thread::yield();
t.join(); start = Clock::now();
std::this_thread::sleep_for(WaitTime);
m.unlock();
t.join();
return 0; return 0;
} }

View File

@ -9,8 +9,6 @@
// UNSUPPORTED: libcpp-has-no-threads // UNSUPPORTED: libcpp-has-no-threads
// UNSUPPORTED: c++98, c++03, c++11 // UNSUPPORTED: c++98, c++03, c++11
// ALLOW_RETRIES: 2
// shared_timed_mutex was introduced in macosx10.12 // shared_timed_mutex was introduced in macosx10.12
// UNSUPPORTED: with_system_cxx_lib=macosx10.11 // UNSUPPORTED: with_system_cxx_lib=macosx10.11
// UNSUPPORTED: with_system_cxx_lib=macosx10.10 // UNSUPPORTED: with_system_cxx_lib=macosx10.10
@ -38,59 +36,68 @@ typedef Clock::duration duration;
typedef std::chrono::milliseconds ms; typedef std::chrono::milliseconds ms;
typedef std::chrono::nanoseconds ns; typedef std::chrono::nanoseconds ns;
std::atomic<unsigned> countDown;
time_point readerStart; // Protected by the above mutex 'm'
time_point writerStart; // Protected by the above mutex 'm'
ms WaitTime = ms(250); ms WaitTime = ms(250);
// Thread sanitizer causes more overhead and will sometimes cause this test void readerMustWait() {
// to fail. To prevent this we give Thread sanitizer more time to complete the --countDown;
// test. m.lock_shared();
#if !defined(TEST_HAS_SANITIZERS) time_point t1 = Clock::now();
ms Tolerance = ms(50); time_point t0 = readerStart;
#else m.unlock_shared();
ms Tolerance = ms(50 * 5); assert(t0.time_since_epoch() > ms(0));
#endif assert(t1 - t0 >= WaitTime);
void f()
{
time_point t0 = Clock::now();
m.lock_shared();
time_point t1 = Clock::now();
m.unlock_shared();
ns d = t1 - t0 - WaitTime;
assert(d < Tolerance); // within tolerance
} }
void g() void reader() {
{ --countDown;
time_point t0 = Clock::now(); m.lock_shared();
m.lock_shared(); m.unlock_shared();
time_point t1 = Clock::now();
m.unlock_shared();
ns d = t1 - t0;
assert(d < Tolerance); // within tolerance
} }
void writerMustWait() {
--countDown;
m.lock();
time_point t1 = Clock::now();
time_point t0 = writerStart;
m.unlock();
assert(t0.time_since_epoch() > ms(0));
assert(t1 - t0 >= WaitTime);
}
int main(int, char**) int main(int, char**)
{ {
m.lock(); int threads = 5;
std::vector<std::thread> v;
for (int i = 0; i < 5; ++i) countDown.store(threads);
v.push_back(std::thread(f)); m.lock();
std::this_thread::sleep_for(WaitTime); std::vector<std::thread> v;
m.unlock(); for (int i = 0; i < threads; ++i)
for (auto& t : v) v.push_back(std::thread(readerMustWait));
t.join(); while (countDown > 0)
m.lock_shared(); std::this_thread::yield();
for (auto& t : v) readerStart = Clock::now();
t = std::thread(g); std::this_thread::sleep_for(WaitTime);
std::thread q(f); m.unlock();
std::this_thread::sleep_for(WaitTime); for (auto& t : v)
m.unlock_shared(); t.join();
for (auto& t : v)
t.join(); countDown.store(threads + 1);
q.join(); m.lock_shared();
for (auto& t : v)
t = std::thread(reader);
std::thread q(writerMustWait);
while (countDown > 0)
std::this_thread::yield();
writerStart = Clock::now();
std::this_thread::sleep_for(WaitTime);
m.unlock_shared();
for (auto& t : v)
t.join();
q.join();
return 0; return 0;
} }

View File

@ -9,8 +9,6 @@
// UNSUPPORTED: libcpp-has-no-threads // UNSUPPORTED: libcpp-has-no-threads
// UNSUPPORTED: c++98, c++03, c++11 // UNSUPPORTED: c++98, c++03, c++11
// ALLOW_RETRIES: 2
// shared_timed_mutex was introduced in macosx10.12 // shared_timed_mutex was introduced in macosx10.12
// UNSUPPORTED: with_system_cxx_lib=macosx10.11 // UNSUPPORTED: with_system_cxx_lib=macosx10.11
// UNSUPPORTED: with_system_cxx_lib=macosx10.10 // UNSUPPORTED: with_system_cxx_lib=macosx10.10
@ -39,58 +37,55 @@ typedef Clock::duration duration;
typedef std::chrono::milliseconds ms; typedef std::chrono::milliseconds ms;
typedef std::chrono::nanoseconds ns; typedef std::chrono::nanoseconds ns;
ms WaitTime = ms(250); ms SuccessWaitTime = ms(5000); // Some machines are busy or slow or both
ms FailureWaitTime = ms(50);
// Thread sanitizer causes more overhead and will sometimes cause this test // On busy or slow machines, there can be a significant delay between thread
// to fail. To prevent this we give Thread sanitizer more time to complete the // creation and thread start, so we use an atomic variable to signal that the
// test. // thread is actually executing.
#if !defined(TEST_HAS_SANITIZERS) static std::atomic<unsigned> countDown;
ms Tolerance = ms(50);
#else
ms Tolerance = ms(50 * 5);
#endif
void f1() void f1()
{ {
time_point t0 = Clock::now(); --countDown;
assert(m.try_lock_shared_until(Clock::now() + WaitTime + Tolerance) == true); time_point t0 = Clock::now();
time_point t1 = Clock::now(); assert(m.try_lock_shared_until(Clock::now() + SuccessWaitTime) == true);
m.unlock_shared(); time_point t1 = Clock::now();
ns d = t1 - t0 - WaitTime; m.unlock_shared();
assert(d < Tolerance); // within 50ms assert(t1 - t0 <= SuccessWaitTime);
} }
void f2() void f2()
{ {
time_point t0 = Clock::now(); time_point t0 = Clock::now();
assert(m.try_lock_shared_until(Clock::now() + WaitTime) == false); assert(m.try_lock_shared_until(Clock::now() + FailureWaitTime) == false);
time_point t1 = Clock::now(); assert(Clock::now() - t0 >= FailureWaitTime);
ns d = t1 - t0 - WaitTime;
assert(d < Tolerance); // within tolerance
} }
int main(int, char**) int main(int, char**)
{ {
{ int threads = 5;
m.lock(); {
std::vector<std::thread> v; countDown.store(threads);
for (int i = 0; i < 5; ++i) m.lock();
v.push_back(std::thread(f1)); std::vector<std::thread> v;
std::this_thread::sleep_for(WaitTime); for (int i = 0; i < threads; ++i)
m.unlock(); v.push_back(std::thread(f1));
for (auto& t : v) while (countDown > 0)
t.join(); std::this_thread::yield();
} m.unlock();
{ for (auto& t : v)
m.lock(); t.join();
std::vector<std::thread> v; }
for (int i = 0; i < 5; ++i) {
v.push_back(std::thread(f2)); m.lock();
std::this_thread::sleep_for(WaitTime + Tolerance); std::vector<std::thread> v;
m.unlock(); for (int i = 0; i < threads; ++i)
for (auto& t : v) v.push_back(std::thread(f2));
t.join(); for (auto& t : v)
} t.join();
m.unlock();
}
return 0; return 0;
} }