Refactor MemoryMappingLayout::Next to use a single struct instead of output parameters. NFC.

Summary:
This is the first in a series of patches to refactor sanitizer_procmaps
to allow MachO section information to be exposed on darwin.

In addition, grouping all segment information in a single struct is
cleaner than passing it through a large set of output parameters, and
avoids the need for annotations of NULL parameters for unneeded
information.

The filename string is optional and must be managed and supplied by the
calling function. This is to allow the MemoryMappedSegment struct to be
stored on the stack without causing overly large stack sizes.

Reviewers: alekseyshl, kubamracek, glider

Subscribers: emaste, llvm-commits

Differential Revision: https://reviews.llvm.org/D35135

llvm-svn: 307688
This commit is contained in:
Francis Ricci 2017-07-11 18:54:00 +00:00
parent 368d8a7351
commit f6a4329b7d
17 changed files with 165 additions and 216 deletions

View File

@ -61,10 +61,9 @@ static void MaybeDumpRegisters(void *context) {
static void MaybeReportNonExecRegion(uptr pc) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
uptr start, end, protection;
while (proc_maps.Next(&start, &end, nullptr, nullptr, 0, &protection)) {
if (pc >= start && pc < end &&
!(protection & MemoryMappingLayout::kProtectionExecute))
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())
Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
}
#endif

View File

@ -140,9 +140,9 @@ void AsanCheckIncompatibleRT() {
// system libraries, causing crashes later in ASan initialization.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
char filename[128];
while (proc_maps.Next(nullptr, nullptr, nullptr, filename,
sizeof(filename), nullptr)) {
if (IsDynamicRTName(filename)) {
MemoryMappedSegment segment(filename, sizeof(filename));
while (proc_maps.Next(&segment)) {
if (IsDynamicRTName(segment.filename)) {
Report("Your application is linked against "
"incompatible ASan runtimes.\n");
Die();

View File

@ -160,15 +160,16 @@ static u32 countAndClearShadowValues(u32 BitIdx, uptr ShadowStart,
static u32 computeWorkingSizeAndReset(u32 BitIdx) {
u32 WorkingSetSize = 0;
MemoryMappingLayout MemIter(true/*cache*/);
uptr Start, End, Prot;
while (MemIter.Next(&Start, &End, nullptr/*offs*/, nullptr/*file*/,
0/*file size*/, &Prot)) {
VPrintf(4, "%s: considering %p-%p app=%d shadow=%d prot=%u\n",
__FUNCTION__, Start, End, Prot, isAppMem(Start),
isShadowMem(Start));
if (isShadowMem(Start) && (Prot & MemoryMappingLayout::kProtectionWrite)) {
VPrintf(3, "%s: walking %p-%p\n", __FUNCTION__, Start, End);
WorkingSetSize += countAndClearShadowValues(BitIdx, Start, End);
MemoryMappedSegment Segment;
while (MemIter.Next(&Segment)) {
VPrintf(4, "%s: considering %p-%p app=%d shadow=%d prot=%u\n", __FUNCTION__,
Segment.start, Segment.end, Segment.protection,
isAppMem(Segment.start), isShadowMem(Segment.start));
if (isShadowMem(Segment.start) && Segment.IsWritable()) {
VPrintf(3, "%s: walking %p-%p\n", __FUNCTION__, Segment.start,
Segment.end);
WorkingSetSize +=
countAndClearShadowValues(BitIdx, Segment.start, Segment.end);
}
}
return WorkingSetSize;

View File

@ -305,11 +305,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
uptr region_begin, uptr region_end, uptr prot) {
uptr region_begin, uptr region_end, bool is_readable) {
uptr intersection_begin = Max(root_region.begin, region_begin);
uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
if (intersection_begin >= intersection_end) return;
bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
root_region.begin, root_region.begin + root_region.size,
region_begin, region_end,
@ -322,11 +321,10 @@ void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
static void ProcessRootRegion(Frontier *frontier,
const RootRegion &root_region) {
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
uptr begin, end, prot;
while (proc_maps.Next(&begin, &end,
/*offset*/ nullptr, /*filename*/ nullptr,
/*filename_size*/ 0, &prot)) {
ScanRootRegion(frontier, root_region, begin, end, prot);
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
ScanRootRegion(frontier, root_region, segment.start, segment.end,
segment.IsReadable());
}
}

View File

@ -127,7 +127,7 @@ struct RootRegion {
InternalMmapVector<RootRegion> const *GetRootRegions();
void ScanRootRegion(Frontier *frontier, RootRegion const &region,
uptr region_begin, uptr region_end, uptr prot);
uptr region_begin, uptr region_end, bool is_readable);
// Run stoptheworld while holding any platform-specific locks.
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);

View File

@ -156,7 +156,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (flags()->use_root_regions) {
for (uptr i = 0; i < root_regions->size(); i++) {
ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
info.protection);
info.protection & kProtectionRead);
}
}

View File

@ -830,13 +830,9 @@ static uptr GetKernelAreaSize() {
// Firstly check if there are writable segments
// mapped to top gigabyte (e.g. stack).
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr end, prot;
while (proc_maps.Next(/*start*/nullptr, &end,
/*offset*/nullptr, /*filename*/nullptr,
/*filename_size*/0, &prot)) {
if ((end >= 3 * gbyte)
&& (prot & MemoryMappingLayout::kProtectionWrite) != 0)
return 0;
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;
}
#if !SANITIZER_ANDROID

View File

@ -81,28 +81,25 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
// Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset;
MemoryMappedSegment segment;
uptr prev_end = 0;
while (proc_maps.Next(&start, &end, &offset, nullptr, 0,
/* protection */nullptr)) {
if ((uptr)&rl < end)
break;
prev_end = end;
while (proc_maps.Next(&segment)) {
if ((uptr)&rl < segment.end) break;
prev_end = segment.end;
}
CHECK((uptr)&rl >= start && (uptr)&rl < end);
CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
if (stacksize > end - prev_end)
stacksize = end - prev_end;
if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
if (stacksize > kMaxThreadStackSize)
stacksize = kMaxThreadStackSize;
*stack_top = end;
*stack_bottom = end - stacksize;
*stack_top = segment.end;
*stack_bottom = segment.end - stacksize;
return;
}
pthread_attr_t attr;

View File

@ -231,13 +231,12 @@ static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
// memory).
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
while (proc_maps.Next(&start, &end,
/*offset*/nullptr, /*filename*/nullptr,
/*filename_size*/0, /*protection*/nullptr)) {
if (start == end) continue; // Empty range.
CHECK_NE(0, end);
if (!IntervalsAreSeparate(start, end - 1, range_start, range_end))
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
if (segment.start == segment.end) continue; // Empty range.
CHECK_NE(0, segment.end);
if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
range_end))
return false;
}
return true;
@ -245,13 +244,13 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __func__);
MemoryMappedSegment segment(filename, kBufSize);
Report("Process memory map follows:\n");
while (proc_maps.Next(&start, &end, /* file_offset */nullptr,
filename, kBufSize, /* protection */nullptr)) {
Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
while (proc_maps.Next(&segment)) {
Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
segment.filename);
}
Report("End of process memory map.\n");
UnmapOrDie(filename, kBufSize);
@ -281,14 +280,14 @@ void ReportFile::Write(const char *buffer, uptr length) {
}
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
uptr s, e, off, prot;
InternalScopedString buff(kMaxPathLength);
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) {
if ((prot & MemoryMappingLayout::kProtectionExecute) != 0
&& internal_strcmp(module, buff.data()) == 0) {
*start = s;
*end = e;
InternalScopedString buff(kMaxPathLength);
MemoryMappedSegment segment(buff.data(), kMaxPathLength);
while (proc_maps.Next(&segment)) {
if (segment.IsExecutable() &&
internal_strcmp(module, segment.filename) == 0) {
*start = segment.start;
*end = segment.end;
return true;
}
}

View File

@ -31,13 +31,37 @@ struct ProcSelfMapsBuff {
void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
// Memory protection masks.
static const uptr kProtectionRead = 1;
static const uptr kProtectionWrite = 2;
static const uptr kProtectionExecute = 4;
static const uptr kProtectionShared = 8;
struct MemoryMappedSegment {
MemoryMappedSegment(char *buff = nullptr, uptr size = 0)
: filename(buff), filename_size(size) {}
~MemoryMappedSegment() {}
bool IsReadable() { return protection & kProtectionRead; }
bool IsWritable() { return protection & kProtectionWrite; }
bool IsExecutable() { return protection & kProtectionExecute; }
bool IsShared() { return protection & kProtectionShared; }
uptr start;
uptr end;
uptr offset;
char *filename; // owned by caller
uptr filename_size;
uptr protection;
ModuleArch arch;
u8 uuid[kModuleUUIDSize];
};
class MemoryMappingLayout {
public:
explicit MemoryMappingLayout(bool cache_enabled);
~MemoryMappingLayout();
bool Next(uptr *start, uptr *end, uptr *offset, char filename[],
uptr filename_size, uptr *protection, ModuleArch *arch = nullptr,
u8 *uuid = nullptr);
bool Next(MemoryMappedSegment *segment);
void Reset();
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
@ -47,12 +71,6 @@ class MemoryMappingLayout {
// Adds all mapped objects into a vector.
void DumpListOfModules(InternalMmapVector<LoadedModule> *modules);
// Memory protection masks.
static const uptr kProtectionRead = 1;
static const uptr kProtectionWrite = 2;
static const uptr kProtectionExecute = 4;
static const uptr kProtectionShared = 8;
private:
void LoadFromCache();
@ -67,10 +85,9 @@ class MemoryMappingLayout {
static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
# elif SANITIZER_MAC
template <u32 kLCSegment, typename SegmentCommand>
bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset, char filename[],
uptr filename_size, ModuleArch *arch, u8 *uuid,
uptr *protection);
void GetSegmentAddrRange(uptr *start, uptr *end, uptr vmaddr, uptr vmsize);
bool NextSegmentLoad(MemoryMappedSegment *segment);
void GetSegmentAddrRange(MemoryMappedSegment *segment, uptr vmaddr,
uptr vmsize);
int current_image_;
u32 current_magic_;
u32 current_filetype_;

View File

@ -119,12 +119,10 @@ void MemoryMappingLayout::LoadFromCache() {
void MemoryMappingLayout::DumpListOfModules(
InternalMmapVector<LoadedModule> *modules) {
Reset();
uptr cur_beg, cur_end, cur_offset, prot;
InternalScopedString module_name(kMaxPathLength);
for (uptr i = 0; Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
module_name.size(), &prot);
i++) {
const char *cur_name = module_name.data();
MemoryMappedSegment segment(module_name.data(), module_name.size());
for (uptr i = 0; Next(&segment); i++) {
const char *cur_name = segment.filename;
if (cur_name[0] == '\0')
continue;
// Don't subtract 'cur_beg' from the first entry:
@ -138,11 +136,11 @@ void MemoryMappingLayout::DumpListOfModules(
// mapped high at address space (in particular, higher than
// shadow memory of the tool), so the module can't be the
// first entry.
uptr base_address = (i ? cur_beg : 0) - cur_offset;
uptr base_address = (i ? segment.start : 0) - segment.offset;
LoadedModule cur_module;
cur_module.set(cur_name, base_address);
cur_module.addAddressRange(cur_beg, cur_end, prot & kProtectionExecute,
prot & kProtectionWrite);
cur_module.addAddressRange(segment.start, segment.end,
segment.IsExecutable(), segment.IsWritable());
modules->push_back(cur_module);
}
}

View File

@ -48,36 +48,27 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
proc_maps->len = Size;
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
uptr *protection, ModuleArch *arch, u8 *uuid) {
CHECK(!arch && "not implemented");
CHECK(!uuid && "not implemented");
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
if (!start) start = &dummy;
if (!end) end = &dummy;
if (!offset) offset = &dummy;
if (!protection) protection = &dummy;
struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
*start = (uptr)VmEntry->kve_start;
*end = (uptr)VmEntry->kve_end;
*offset = (uptr)VmEntry->kve_offset;
segment->start = (uptr)VmEntry->kve_start;
segment->end = (uptr)VmEntry->kve_end;
segment->offset = (uptr)VmEntry->kve_offset;
*protection = 0;
segment->protection = 0;
if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
*protection |= kProtectionRead;
segment->protection |= kProtectionRead;
if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
*protection |= kProtectionWrite;
segment->protection |= kProtectionWrite;
if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
*protection |= kProtectionExecute;
segment->protection |= kProtectionExecute;
if (filename != NULL && filename_size > 0) {
internal_snprintf(filename,
Min(filename_size, (uptr)PATH_MAX),
"%s", VmEntry->kve_path);
if (segment->filename != NULL && segment->filename_size > 0) {
internal_snprintf(segment->filename,
Min(segment->filename_size, (uptr)PATH_MAX), "%s",
VmEntry->kve_path);
}
current_ += VmEntry->kve_structsize;

View File

@ -26,41 +26,28 @@ static bool IsOneOf(char c, char c1, char c2) {
return c == c1 || c == c2;
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
uptr *protection, ModuleArch *arch, u8 *uuid) {
CHECK(!arch && "not implemented");
CHECK(!uuid && "not implemented");
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
char *last = proc_self_maps_.data + proc_self_maps_.len;
if (current_ >= last) return false;
uptr dummy;
if (!start) start = &dummy;
if (!end) end = &dummy;
if (!offset) offset = &dummy;
if (!protection) protection = &dummy;
char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
if (next_line == 0)
next_line = last;
// Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
*start = ParseHex(&current_);
segment->start = ParseHex(&current_);
CHECK_EQ(*current_++, '-');
*end = ParseHex(&current_);
segment->end = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
CHECK(IsOneOf(*current_, '-', 'r'));
*protection = 0;
if (*current_++ == 'r')
*protection |= kProtectionRead;
segment->protection = 0;
if (*current_++ == 'r') segment->protection |= kProtectionRead;
CHECK(IsOneOf(*current_, '-', 'w'));
if (*current_++ == 'w')
*protection |= kProtectionWrite;
if (*current_++ == 'w') segment->protection |= kProtectionWrite;
CHECK(IsOneOf(*current_, '-', 'x'));
if (*current_++ == 'x')
*protection |= kProtectionExecute;
if (*current_++ == 'x') segment->protection |= kProtectionExecute;
CHECK(IsOneOf(*current_, 's', 'p'));
if (*current_++ == 's')
*protection |= kProtectionShared;
if (*current_++ == 's') segment->protection |= kProtectionShared;
CHECK_EQ(*current_++, ' ');
*offset = ParseHex(&current_);
segment->offset = ParseHex(&current_);
CHECK_EQ(*current_++, ' ');
ParseHex(&current_);
CHECK_EQ(*current_++, ':');
@ -77,12 +64,11 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
// Fill in the filename.
uptr i = 0;
while (current_ < next_line) {
if (filename && i < filename_size - 1)
filename[i++] = *current_;
if (segment->filename && i < segment->filename_size - 1)
segment->filename[i++] = *current_;
current_++;
}
if (filename && i < filename_size)
filename[i] = 0;
if (segment->filename && i < segment->filename_size) segment->filename[i] = 0;
current_ = next_line + 1;
return true;
}

View File

@ -96,40 +96,24 @@ void MemoryMappingLayout::LoadFromCache() {
// segment.
// Note that the segment addresses are not necessarily sorted.
template <u32 kLCSegment, typename SegmentCommand>
bool MemoryMappingLayout::NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
ModuleArch *arch, u8 *uuid,
uptr *protection) {
bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) {
const char *lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
GetSegmentAddrRange(start, end, sc->vmaddr, sc->vmsize);
if (protection) {
// Return the initial protection.
*protection = sc->initprot;
}
if (offset) {
if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
*offset = sc->vmaddr;
} else {
*offset = sc->fileoff;
}
}
if (filename) {
if (current_image_ == kDyldImageIdx) {
internal_strncpy(filename, kDyldPath, filename_size);
} else {
internal_strncpy(filename, _dyld_get_image_name(current_image_),
filename_size);
}
}
if (arch) {
*arch = current_arch_;
}
if (uuid) {
internal_memcpy(uuid, current_uuid_, kModuleUUIDSize);
GetSegmentAddrRange(segment, sc->vmaddr, sc->vmsize);
// Return the initial protection.
segment->protection = sc->initprot;
segment->offset =
(current_filetype_ == /*MH_EXECUTE*/ 0x2) ? sc->vmaddr : sc->fileoff;
if (segment->filename) {
const char *src = (current_image_ == kDyldImageIdx)
? kDyldPath
: _dyld_get_image_name(current_image_);
internal_strncpy(segment->filename, src, segment->filename_size);
}
segment->arch = current_arch_;
internal_memcpy(segment->uuid, current_uuid_, kModuleUUIDSize);
return true;
}
return false;
@ -215,8 +199,7 @@ static mach_header *get_dyld_image_header() {
(vm_region_info_t)&info, &count);
if (err != KERN_SUCCESS) return nullptr;
if (size >= sizeof(mach_header) &&
info.protection & MemoryMappingLayout::kProtectionRead) {
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
mach_header *hdr = (mach_header *)address;
if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
hdr->filetype == MH_DYLINKER) {
@ -233,7 +216,7 @@ const mach_header *get_dyld_hdr() {
return dyld_hdr;
}
void MemoryMappingLayout::GetSegmentAddrRange(uptr *start, uptr *end,
void MemoryMappingLayout::GetSegmentAddrRange(MemoryMappedSegment *segment,
uptr vmaddr, uptr vmsize) {
if (current_image_ == kDyldImageIdx) {
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
@ -242,18 +225,16 @@ void MemoryMappingLayout::GetSegmentAddrRange(uptr *start, uptr *end,
// isn't actually the absolute segment address, but the offset portion
// of the address is accurate when combined with the dyld base address,
// and the mask will give just this offset.
if (start) *start = (vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
if (end) *end = (vmaddr & 0xfffff) + vmsize + (uptr)get_dyld_hdr();
segment->start = (vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
segment->end = (vmaddr & 0xfffff) + vmsize + (uptr)get_dyld_hdr();
} else {
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
if (start) *start = vmaddr + dlloff;
if (end) *end = vmaddr + vmsize + dlloff;
segment->start = vmaddr + dlloff;
segment->end = vmaddr + vmsize + dlloff;
}
}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
uptr *protection, ModuleArch *arch, u8 *uuid) {
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
for (; current_image_ >= kDyldImageIdx; current_image_--) {
const mach_header *hdr = (current_image_ == kDyldImageIdx)
? get_dyld_hdr()
@ -291,16 +272,13 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
start, end, offset, filename, filename_size, arch, uuid,
protection))
segment))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
start, end, offset, filename, filename_size, arch, uuid,
protection))
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(segment))
return true;
break;
}
@ -315,28 +293,22 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
void MemoryMappingLayout::DumpListOfModules(
InternalMmapVector<LoadedModule> *modules) {
Reset();
uptr cur_beg, cur_end, prot;
ModuleArch cur_arch;
u8 cur_uuid[kModuleUUIDSize];
InternalScopedString module_name(kMaxPathLength);
for (uptr i = 0; Next(&cur_beg, &cur_end, 0, module_name.data(),
module_name.size(), &prot, &cur_arch, &cur_uuid[0]);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
continue;
MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
for (uptr i = 0; Next(&segment); i++) {
if (segment.filename[0] == '\0') continue;
LoadedModule *cur_module = nullptr;
if (!modules->empty() &&
0 == internal_strcmp(cur_name, modules->back().full_name())) {
0 == internal_strcmp(segment.filename, modules->back().full_name())) {
cur_module = &modules->back();
} else {
modules->push_back(LoadedModule());
cur_module = &modules->back();
cur_module->set(cur_name, cur_beg, cur_arch, cur_uuid,
current_instrumented_);
cur_module->set(segment.filename, segment.start, segment.arch,
segment.uuid, current_instrumented_);
}
cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute,
prot & kProtectionWrite);
cur_module->addAddressRange(segment.start, segment.end,
segment.IsExecutable(), segment.IsWritable());
}
}

View File

@ -270,20 +270,19 @@ namespace __dsan {
static void InitDataSeg() {
MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
MemoryMappedSegment segment(name, ARRAY_SIZE(name));
bool prev_is_data = false;
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
/*protection*/ 0)) {
bool is_data = offset != 0 && name[0] != 0;
while (proc_maps.Next(&segment)) {
bool is_data = segment.offset != 0 && segment.filename[0] != 0;
// BSS may get merged with [heap] in /proc/self/maps. This is not very
// reliable.
bool is_bss = offset == 0 &&
(name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data;
if (g_data_start == 0 && is_data)
g_data_start = start;
if (is_bss)
g_data_end = end;
bool is_bss = segment.offset == 0 &&
(segment.filename[0] == 0 ||
internal_strcmp(segment.filename, "[heap]") == 0) &&
prev_is_data;
if (g_data_start == 0 && is_data) g_data_start = segment.start;
if (is_bss) g_data_end = segment.end;
prev_is_data = is_data;
}
VPrintf(1, "guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);

View File

@ -181,17 +181,15 @@ static void MapRodata() {
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset, prot;
// Reusing the buffer 'name'.
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
if (name[0] != 0 && name[0] != '['
&& (prot & MemoryMappingLayout::kProtectionRead)
&& (prot & MemoryMappingLayout::kProtectionExecute)
&& !(prot & MemoryMappingLayout::kProtectionWrite)
&& IsAppMem(start)) {
MemoryMappedSegment segment(name, ARRAY_SIZE(name));
while (proc_maps.Next(&segment)) {
if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
segment.IsReadable() && segment.IsExecutable() &&
!segment.IsWritable() && IsAppMem(segment.start)) {
// Assume it's .rodata
char *shadow_start = (char*)MemToShadow(start);
char *shadow_end = (char*)MemToShadow(end);
char *shadow_start = (char *)MemToShadow(segment.start);
char *shadow_end = (char *)MemToShadow(segment.end);
for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);

View File

@ -118,18 +118,16 @@ static void ProtectRange(uptr beg, uptr end) {
void CheckAndProtect() {
// Ensure that the binary is indeed compiled with -pie.
MemoryMappingLayout proc_maps(true);
uptr p, end, prot;
while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) {
if (IsAppMem(p))
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
if (IsAppMem(segment.start)) continue;
if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
if (segment.protection == 0) // Zero page or mprotected.
continue;
if (p >= HeapMemEnd() &&
p < HeapEnd())
continue;
if (prot == 0) // Zero page or mprotected.
continue;
if (p >= VdsoBeg()) // vdso
if (segment.start >= VdsoBeg()) // vdso
break;
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
segment.start, segment.end);
Die();
}