Fix a race condition in FuncUnwinders where the mutex was being

acquired only after checking if the ivar shared pointer was already
filled in.  But when I assign an UnwindPlan object to the shared
pointer, I assign an empty object and then fill it in.  That leaves
a window where another thread could get the shared pointer to the
empty (but quickly being-filled-in) object and lead to a crash.

Also two changes from Greg for correctness on the TestMultipleDebuggers
test case.

<rdar://problem/30564102> 

llvm-svn: 296084
This commit is contained in:
Jason Molenda 2017-02-24 03:35:46 +00:00
parent 9a2bba7251
commit 1e7ecd3e64
4 changed files with 16 additions and 10 deletions

View File

@ -1029,7 +1029,7 @@ for this region.
// region what contains data mapped from the specified file.
//
// RESPONSE
// <unsinged-hex64> - Load address of the file in big endian encoding
// <unsigned-hex64> - Load address of the file in big endian encoding
// "E01" - the requested file isn't loaded
// "EXX" - for any other errors
//

View File

@ -6,6 +6,7 @@ from __future__ import print_function
import os
import re
import subprocess
import sys
import lldb
from lldbsuite.test.decorators import *
@ -38,6 +39,7 @@ class TestMultipleSimultaneousDebuggers(TestBase):
self.inferior_exe = os.path.join(os.getcwd(), "testprog")
self.buildDriver('testprog.cpp', self.inferior_exe)
self.addTearDownHook(lambda: os.remove(self.inferior_exe))
sys.exit()
# check_call will raise a CalledProcessError if multi-process-driver doesn't return
# exit code 0 to indicate success. We can let this exception go - the test harness

View File

@ -217,6 +217,10 @@ void *do_one_debugger (void *in)
int main (int argc, char **argv)
{
#if !defined(_MSC_VER)
signal(SIGPIPE, SIG_IGN);
#endif
SBDebugger::Initialize();
completed_threads_array = (bool *) malloc (sizeof (bool) * NUMBER_OF_SIMULTANEOUS_DEBUG_SESSIONS);

View File

@ -73,13 +73,13 @@ UnwindPlanSP FuncUnwinders::GetUnwindPlanAtCallSite(Target &target,
UnwindPlanSP FuncUnwinders::GetCompactUnwindUnwindPlan(Target &target,
int current_offset) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_compact_unwind.size() > 0)
return m_unwind_plan_compact_unwind[0]; // FIXME support multiple compact
// unwind plans for one func
if (m_tried_unwind_plan_compact_unwind)
return UnwindPlanSP();
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_plan_compact_unwind = true;
if (m_range.GetBaseAddress().IsValid()) {
Address current_pc(m_range.GetBaseAddress());
@ -101,10 +101,10 @@ UnwindPlanSP FuncUnwinders::GetCompactUnwindUnwindPlan(Target &target,
UnwindPlanSP FuncUnwinders::GetEHFrameUnwindPlan(Target &target,
int current_offset) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_eh_frame_sp.get() || m_tried_unwind_plan_eh_frame)
return m_unwind_plan_eh_frame_sp;
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_plan_eh_frame = true;
if (m_range.GetBaseAddress().IsValid()) {
Address current_pc(m_range.GetBaseAddress());
@ -123,10 +123,10 @@ UnwindPlanSP FuncUnwinders::GetEHFrameUnwindPlan(Target &target,
UnwindPlanSP FuncUnwinders::GetArmUnwindUnwindPlan(Target &target,
int current_offset) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_arm_unwind_sp.get() || m_tried_unwind_plan_arm_unwind)
return m_unwind_plan_arm_unwind_sp;
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_plan_arm_unwind = true;
if (m_range.GetBaseAddress().IsValid()) {
Address current_pc(m_range.GetBaseAddress());
@ -147,6 +147,7 @@ UnwindPlanSP FuncUnwinders::GetArmUnwindUnwindPlan(Target &target,
UnwindPlanSP FuncUnwinders::GetEHFrameAugmentedUnwindPlan(Target &target,
Thread &thread,
int current_offset) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_eh_frame_augmented_sp.get() ||
m_tried_unwind_plan_eh_frame_augmented)
return m_unwind_plan_eh_frame_augmented_sp;
@ -162,7 +163,6 @@ UnwindPlanSP FuncUnwinders::GetEHFrameAugmentedUnwindPlan(Target &target,
return m_unwind_plan_eh_frame_augmented_sp;
}
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_plan_eh_frame_augmented = true;
UnwindPlanSP eh_frame_plan = GetEHFrameUnwindPlan(target, current_offset);
@ -190,12 +190,12 @@ UnwindPlanSP FuncUnwinders::GetEHFrameAugmentedUnwindPlan(Target &target,
UnwindPlanSP FuncUnwinders::GetAssemblyUnwindPlan(Target &target,
Thread &thread,
int current_offset) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_assembly_sp.get() || m_tried_unwind_plan_assembly ||
m_unwind_table.GetAllowAssemblyEmulationUnwindPlans() == false) {
return m_unwind_plan_assembly_sp;
}
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_plan_assembly = true;
UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
@ -298,10 +298,10 @@ UnwindPlanSP FuncUnwinders::GetUnwindPlanAtNonCallSite(Target &target,
UnwindPlanSP FuncUnwinders::GetUnwindPlanFastUnwind(Target &target,
Thread &thread) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_fast_sp.get() || m_tried_unwind_fast)
return m_unwind_plan_fast_sp;
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_fast = true;
UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
@ -316,10 +316,10 @@ UnwindPlanSP FuncUnwinders::GetUnwindPlanFastUnwind(Target &target,
}
UnwindPlanSP FuncUnwinders::GetUnwindPlanArchitectureDefault(Thread &thread) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_arch_default_sp.get() || m_tried_unwind_arch_default)
return m_unwind_plan_arch_default_sp;
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_arch_default = true;
Address current_pc;
@ -340,11 +340,11 @@ UnwindPlanSP FuncUnwinders::GetUnwindPlanArchitectureDefault(Thread &thread) {
UnwindPlanSP
FuncUnwinders::GetUnwindPlanArchitectureDefaultAtFunctionEntry(Thread &thread) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_unwind_plan_arch_default_at_func_entry_sp.get() ||
m_tried_unwind_arch_default_at_func_entry)
return m_unwind_plan_arch_default_at_func_entry_sp;
std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_tried_unwind_arch_default_at_func_entry = true;
Address current_pc;
@ -365,10 +365,10 @@ FuncUnwinders::GetUnwindPlanArchitectureDefaultAtFunctionEntry(Thread &thread) {
}
Address &FuncUnwinders::GetFirstNonPrologueInsn(Target &target) {
std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_first_non_prologue_insn.IsValid())
return m_first_non_prologue_insn;
std::lock_guard<std::recursive_mutex> guard(m_mutex);
ExecutionContext exe_ctx(target.shared_from_this(), false);
UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
if (assembly_profiler_sp)