mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-03-01 14:58:18 +00:00
[sanitizer_common][fuchsia] Get correct vmar info
Forward fix for https://github.com/llvm/llvm-project/pull/75256 The process for MmapAlignedOrDieOnFatalError involves trimming the start and end of a mapping to ensure it's aligned correctly. This invloves calling zx_vmar_map again but overwriting a part of the original vmar which involves a call to zx_object_get_info(ZX_INFO_VMAR). After https://github.com/llvm/llvm-project/pull/75256, we unconditionally called this on gSanitizerHeapVmar but this can lead to a ZX_ERR_INVALID_ARGS if the prior mapping was on the root vmar. This can be fixed by also returning the vmar we did the last mapping to and using that for followup operations that specifically involve the same vmar. This way we don't have to try each syscall for both vmars.
This commit is contained in:
parent
1df4fb9881
commit
a08402f95b
@ -160,7 +160,8 @@ static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) {
|
||||
|
||||
static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
|
||||
size_t vmar_offset, zx_handle_t vmo,
|
||||
size_t size, uintptr_t *addr) {
|
||||
size_t size, uintptr_t *addr,
|
||||
zx_handle_t *vmar_used = nullptr) {
|
||||
zx_handle_t vmar;
|
||||
zx_status_t status = GetSanitizerHeapVmar(&vmar);
|
||||
if (status != ZX_OK)
|
||||
@ -168,11 +169,15 @@ static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
|
||||
|
||||
status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo,
|
||||
/*vmo_offset=*/0, size, addr);
|
||||
if (status == ZX_ERR_NO_RESOURCES) {
|
||||
if (vmar_used)
|
||||
*vmar_used = gSanitizerHeapVmar;
|
||||
if (status == ZX_ERR_NO_RESOURCES || status == ZX_ERR_INVALID_ARGS) {
|
||||
// This means there's no space in the heap VMAR, so fallback to the root
|
||||
// VMAR.
|
||||
status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo,
|
||||
/*vmo_offset=*/0, size, addr);
|
||||
if (vmar_used)
|
||||
*vmar_used = _zx_vmar_root_self();
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -367,8 +372,10 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
// beginning of the VMO, and unmap the excess before and after.
|
||||
size_t map_size = size + alignment;
|
||||
uintptr_t addr;
|
||||
zx_handle_t vmar_used;
|
||||
status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
|
||||
/*vmar_offset=*/0, vmo, map_size, &addr);
|
||||
/*vmar_offset=*/0, vmo, map_size, &addr,
|
||||
&vmar_used);
|
||||
if (status == ZX_OK) {
|
||||
uintptr_t map_addr = addr;
|
||||
uintptr_t map_end = map_addr + map_size;
|
||||
@ -376,21 +383,22 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
uintptr_t end = addr + size;
|
||||
if (addr != map_addr) {
|
||||
zx_info_vmar_t info;
|
||||
status = _zx_object_get_info(gSanitizerHeapVmar, ZX_INFO_VMAR, &info,
|
||||
sizeof(info), NULL, NULL);
|
||||
status = _zx_object_get_info(vmar_used, ZX_INFO_VMAR, &info, sizeof(info),
|
||||
NULL, NULL);
|
||||
if (status == ZX_OK) {
|
||||
uintptr_t new_addr;
|
||||
status = TryVmoMapSanitizerVmar(
|
||||
status = _zx_vmar_map(
|
||||
vmar_used,
|
||||
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
|
||||
addr - info.base, vmo, size, &new_addr);
|
||||
addr - info.base, vmo, 0, size, &new_addr);
|
||||
if (status == ZX_OK)
|
||||
CHECK_EQ(new_addr, addr);
|
||||
}
|
||||
}
|
||||
if (status == ZX_OK && addr != map_addr)
|
||||
status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
|
||||
status = _zx_vmar_unmap(vmar_used, map_addr, addr - map_addr);
|
||||
if (status == ZX_OK && end != map_end)
|
||||
status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
|
||||
status = _zx_vmar_unmap(vmar_used, end, map_end - end);
|
||||
}
|
||||
_zx_handle_close(vmo);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user