[NFC][sanitizer] Fix naming in StackStore

This commit is contained in:
Vitaly Buka 2021-11-18 19:37:39 -08:00
parent 8210948a46
commit d591a46d17
3 changed files with 37 additions and 45 deletions

View File

@ -5,10 +5,6 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A fast memory allocator that does not support free() nor realloc().
// All allocations are forever.
//===----------------------------------------------------------------------===//
#include "sanitizer_stack_store.h"
@ -20,73 +16,73 @@ namespace __sanitizer {
static constexpr u32 kStackSizeBits = 16;
StackStore::Id StackStore::store(const StackTrace &trace) {
uptr *stack_trace = alloc(trace.size + 1);
StackStore::Id StackStore::Store(const StackTrace &trace) {
uptr *stack_trace = Alloc(trace.size + 1);
CHECK_LT(trace.size, 1 << kStackSizeBits);
*stack_trace = trace.size + (trace.tag << kStackSizeBits);
internal_memcpy(stack_trace + 1, trace.trace, trace.size * sizeof(uptr));
return reinterpret_cast<StackStore::Id>(stack_trace);
}
StackTrace StackStore::load(Id id) {
StackTrace StackStore::Load(Id id) {
const uptr *stack_trace = reinterpret_cast<const uptr *>(id);
uptr size = *stack_trace & ((1 << kStackSizeBits) - 1);
uptr tag = *stack_trace >> kStackSizeBits;
return StackTrace(stack_trace + 1, size, tag);
}
uptr *StackStore::tryAlloc(uptr count) {
uptr *StackStore::TryAlloc(uptr count) {
// Optimisic lock-free allocation, essentially try to bump the region ptr.
for (;;) {
uptr cmp = atomic_load(&region_pos, memory_order_acquire);
uptr end = atomic_load(&region_end, memory_order_acquire);
uptr cmp = atomic_load(&region_pos_, memory_order_acquire);
uptr end = atomic_load(&region_end_, memory_order_acquire);
uptr size = count * sizeof(uptr);
if (cmp == 0 || cmp + size > end)
return nullptr;
if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
if (atomic_compare_exchange_weak(&region_pos_, &cmp, cmp + size,
memory_order_acquire))
return reinterpret_cast<uptr *>(cmp);
}
}
uptr *StackStore::alloc(uptr count) {
uptr *StackStore::Alloc(uptr count) {
// First, try to allocate optimisitically.
uptr *s = tryAlloc(count);
uptr *s = TryAlloc(count);
if (LIKELY(s))
return s;
return refillAndAlloc(count);
return RefillAndAlloc(count);
}
uptr *StackStore::refillAndAlloc(uptr count) {
uptr *StackStore::RefillAndAlloc(uptr count) {
// If failed, lock, retry and alloc new superblock.
SpinMutexLock l(&mtx);
SpinMutexLock l(&mtx_);
for (;;) {
uptr *s = tryAlloc(count);
uptr *s = TryAlloc(count);
if (s)
return s;
atomic_store(&region_pos, 0, memory_order_relaxed);
atomic_store(&region_pos_, 0, memory_order_relaxed);
uptr size = count * sizeof(uptr) + sizeof(BlockInfo);
uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
new_block->next = curr;
new_block->next = curr_;
new_block->ptr = mem;
new_block->size = allocsz;
curr = new_block;
curr_ = new_block;
atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
atomic_fetch_add(&mapped_size_, allocsz, memory_order_relaxed);
allocsz -= sizeof(BlockInfo);
atomic_store(&region_end, mem + allocsz, memory_order_release);
atomic_store(&region_pos, mem, memory_order_release);
atomic_store(&region_end_, mem + allocsz, memory_order_release);
atomic_store(&region_pos_, mem, memory_order_release);
}
}
void StackStore::TestOnlyUnmap() {
while (curr) {
uptr mem = curr->ptr;
uptr allocsz = curr->size;
curr = curr->next;
while (curr_) {
uptr mem = curr_->ptr;
uptr allocsz = curr_->size;
curr_ = curr_->next;
UnmapOrDie((void *)mem, allocsz);
}
internal_memset(this, 0, sizeof(*this));

View File

@ -5,10 +5,6 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A fast memory allocator that does not support free() nor realloc().
// All allocations are forever.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_STACK_STORE_H
#define SANITIZER_STACK_STORE_H
@ -26,27 +22,27 @@ class StackStore {
using Id = uptr;
Id store(const StackTrace &trace);
StackTrace load(Id id);
uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
Id Store(const StackTrace &trace);
StackTrace Load(Id id);
uptr Allocated() const { return atomic_load_relaxed(&mapped_size_); }
void TestOnlyUnmap();
private:
uptr *alloc(uptr count = 1);
uptr *tryAlloc(uptr count);
uptr *refillAndAlloc(uptr count);
mutable StaticSpinMutex mtx = {}; // Protects alloc of new blocks.
atomic_uintptr_t region_pos = {}; // Region allocator for Node's.
atomic_uintptr_t region_end = {};
atomic_uintptr_t mapped_size = {};
uptr *Alloc(uptr count = 1);
uptr *TryAlloc(uptr count);
uptr *RefillAndAlloc(uptr count);
mutable StaticSpinMutex mtx_ = {}; // Protects alloc of new blocks.
atomic_uintptr_t region_pos_ = {}; // Region allocator for Node's.
atomic_uintptr_t region_end_ = {};
atomic_uintptr_t mapped_size_ = {};
struct BlockInfo {
const BlockInfo *next;
uptr ptr;
uptr size;
};
const BlockInfo *curr = nullptr;
const BlockInfo *curr_ = nullptr;
};
} // namespace __sanitizer

View File

@ -73,20 +73,20 @@ void StackDepotHandle::inc_use_count_unsafe() {
}
uptr StackDepotNode::allocated() {
return stackStore.allocated() + storeIds.MemoryUsage() +
return stackStore.Allocated() + storeIds.MemoryUsage() +
useCounts.MemoryUsage();
}
void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
stack_hash = hash;
storeIds[id] = stackStore.store(args);
storeIds[id] = stackStore.Store(args);
}
StackDepotNode::args_type StackDepotNode::load(u32 id) const {
StackStore::Id store_id = storeIds[id];
if (!store_id)
return {};
return stackStore.load(store_id);
return stackStore.Load(store_id);
}
StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }