[libcxx testing] Make three locking tests more reliable

The challenge with measuring time in tests is that slow and/or busy
machines can cause tests to fail in unexpected ways. After this change,
three tests should be much more robust. The only remaining and tiny race
that I can think of is preemption after `--countDown`. That being said,
the race isn't fixable because the standard library doesn't provide a
way to count threads that are waiting to acquire a lock.

Reviewers: ldionne, EricWF, howard.hinnant, mclow.lists, #libc

Reviewed By: ldionne, #libc

Subscribers: dexonsmith, jfb, broadwaylamb, libcxx-commits

Tags: #libc

Differential Revision: https://reviews.llvm.org/D79406
This commit is contained in:
David Zarzycki 2020-05-09 11:10:41 -04:00
parent 0b9783350b
commit 4f4ce13944
3 changed files with 106 additions and 110 deletions

View File

@ -9,8 +9,6 @@
// UNSUPPORTED: libcpp-has-no-threads
// UNSUPPORTED: c++98, c++03, c++11
// ALLOW_RETRIES: 2
// shared_timed_mutex was introduced in macosx10.12
// UNSUPPORTED: with_system_cxx_lib=macosx10.11
// UNSUPPORTED: with_system_cxx_lib=macosx10.10
@ -37,36 +35,32 @@ typedef Clock::duration duration;
typedef std::chrono::milliseconds ms;
typedef std::chrono::nanoseconds ns;
std::atomic<bool> ready(false);
time_point start;
ms WaitTime = ms(250);
// Thread sanitizer causes more overhead and will sometimes cause this test
// to fail. To prevent this we give Thread sanitizer more time to complete the
// test.
#if !TEST_HAS_FEATURE(thread_sanitizer)
ms Tolerance = ms(50);
#else
ms Tolerance = ms(100);
#endif
void f()
{
time_point t0 = Clock::now();
m.lock();
time_point t1 = Clock::now();
m.unlock();
ns d = t1 - t0 - ms(250);
assert(d < ms(50)); // within 50ms
ready.store(true);
m.lock();
time_point t0 = start;
time_point t1 = Clock::now();
m.unlock();
assert(t0.time_since_epoch() > ms(0));
assert(t1 - t0 >= WaitTime);
}
int main(int, char**)
{
m.lock();
std::thread t(f);
std::this_thread::sleep_for(ms(250));
m.unlock();
t.join();
m.lock();
std::thread t(f);
while (!ready)
std::this_thread::yield();
start = Clock::now();
std::this_thread::sleep_for(WaitTime);
m.unlock();
t.join();
return 0;
}

View File

@ -9,8 +9,6 @@
// UNSUPPORTED: libcpp-has-no-threads
// UNSUPPORTED: c++98, c++03, c++11
// ALLOW_RETRIES: 2
// shared_timed_mutex was introduced in macosx10.12
// UNSUPPORTED: with_system_cxx_lib=macosx10.11
// UNSUPPORTED: with_system_cxx_lib=macosx10.10
@ -38,59 +36,68 @@ typedef Clock::duration duration;
typedef std::chrono::milliseconds ms;
typedef std::chrono::nanoseconds ns;
std::atomic<unsigned> countDown;
time_point readerStart; // Protected by the above mutex 'm'
time_point writerStart; // Protected by the above mutex 'm'
ms WaitTime = ms(250);
// Thread sanitizer causes more overhead and will sometimes cause this test
// to fail. To prevent this we give Thread sanitizer more time to complete the
// test.
#if !defined(TEST_HAS_SANITIZERS)
ms Tolerance = ms(50);
#else
ms Tolerance = ms(50 * 5);
#endif
void f()
{
time_point t0 = Clock::now();
m.lock_shared();
time_point t1 = Clock::now();
m.unlock_shared();
ns d = t1 - t0 - WaitTime;
assert(d < Tolerance); // within tolerance
void readerMustWait() {
--countDown;
m.lock_shared();
time_point t1 = Clock::now();
time_point t0 = readerStart;
m.unlock_shared();
assert(t0.time_since_epoch() > ms(0));
assert(t1 - t0 >= WaitTime);
}
void g()
{
time_point t0 = Clock::now();
m.lock_shared();
time_point t1 = Clock::now();
m.unlock_shared();
ns d = t1 - t0;
assert(d < Tolerance); // within tolerance
void reader() {
--countDown;
m.lock_shared();
m.unlock_shared();
}
void writerMustWait() {
--countDown;
m.lock();
time_point t1 = Clock::now();
time_point t0 = writerStart;
m.unlock();
assert(t0.time_since_epoch() > ms(0));
assert(t1 - t0 >= WaitTime);
}
int main(int, char**)
{
m.lock();
std::vector<std::thread> v;
for (int i = 0; i < 5; ++i)
v.push_back(std::thread(f));
std::this_thread::sleep_for(WaitTime);
m.unlock();
for (auto& t : v)
t.join();
m.lock_shared();
for (auto& t : v)
t = std::thread(g);
std::thread q(f);
std::this_thread::sleep_for(WaitTime);
m.unlock_shared();
for (auto& t : v)
t.join();
q.join();
int threads = 5;
countDown.store(threads);
m.lock();
std::vector<std::thread> v;
for (int i = 0; i < threads; ++i)
v.push_back(std::thread(readerMustWait));
while (countDown > 0)
std::this_thread::yield();
readerStart = Clock::now();
std::this_thread::sleep_for(WaitTime);
m.unlock();
for (auto& t : v)
t.join();
countDown.store(threads + 1);
m.lock_shared();
for (auto& t : v)
t = std::thread(reader);
std::thread q(writerMustWait);
while (countDown > 0)
std::this_thread::yield();
writerStart = Clock::now();
std::this_thread::sleep_for(WaitTime);
m.unlock_shared();
for (auto& t : v)
t.join();
q.join();
return 0;
}

View File

@ -9,8 +9,6 @@
// UNSUPPORTED: libcpp-has-no-threads
// UNSUPPORTED: c++98, c++03, c++11
// ALLOW_RETRIES: 2
// shared_timed_mutex was introduced in macosx10.12
// UNSUPPORTED: with_system_cxx_lib=macosx10.11
// UNSUPPORTED: with_system_cxx_lib=macosx10.10
@ -39,58 +37,55 @@ typedef Clock::duration duration;
typedef std::chrono::milliseconds ms;
typedef std::chrono::nanoseconds ns;
ms WaitTime = ms(250);
ms SuccessWaitTime = ms(5000); // Some machines are busy or slow or both
ms FailureWaitTime = ms(50);
// Thread sanitizer causes more overhead and will sometimes cause this test
// to fail. To prevent this we give Thread sanitizer more time to complete the
// test.
#if !defined(TEST_HAS_SANITIZERS)
ms Tolerance = ms(50);
#else
ms Tolerance = ms(50 * 5);
#endif
// On busy or slow machines, there can be a significant delay between thread
// creation and thread start, so we use an atomic variable to signal that the
// thread is actually executing.
static std::atomic<unsigned> countDown;
void f1()
{
time_point t0 = Clock::now();
assert(m.try_lock_shared_until(Clock::now() + WaitTime + Tolerance) == true);
time_point t1 = Clock::now();
m.unlock_shared();
ns d = t1 - t0 - WaitTime;
assert(d < Tolerance); // within 50ms
--countDown;
time_point t0 = Clock::now();
assert(m.try_lock_shared_until(Clock::now() + SuccessWaitTime) == true);
time_point t1 = Clock::now();
m.unlock_shared();
assert(t1 - t0 <= SuccessWaitTime);
}
void f2()
{
time_point t0 = Clock::now();
assert(m.try_lock_shared_until(Clock::now() + WaitTime) == false);
time_point t1 = Clock::now();
ns d = t1 - t0 - WaitTime;
assert(d < Tolerance); // within tolerance
time_point t0 = Clock::now();
assert(m.try_lock_shared_until(Clock::now() + FailureWaitTime) == false);
assert(Clock::now() - t0 >= FailureWaitTime);
}
int main(int, char**)
{
{
m.lock();
std::vector<std::thread> v;
for (int i = 0; i < 5; ++i)
v.push_back(std::thread(f1));
std::this_thread::sleep_for(WaitTime);
m.unlock();
for (auto& t : v)
t.join();
}
{
m.lock();
std::vector<std::thread> v;
for (int i = 0; i < 5; ++i)
v.push_back(std::thread(f2));
std::this_thread::sleep_for(WaitTime + Tolerance);
m.unlock();
for (auto& t : v)
t.join();
}
int threads = 5;
{
countDown.store(threads);
m.lock();
std::vector<std::thread> v;
for (int i = 0; i < threads; ++i)
v.push_back(std::thread(f1));
while (countDown > 0)
std::this_thread::yield();
m.unlock();
for (auto& t : v)
t.join();
}
{
m.lock();
std::vector<std::thread> v;
for (int i = 0; i < threads; ++i)
v.push_back(std::thread(f2));
for (auto& t : v)
t.join();
m.unlock();
}
return 0;
}