Bug 1163168 - Prettify IonAssemblerBuffer.h. r=dougc

This commit is contained in:
Sean Stangl 2015-05-08 11:55:34 -07:00
parent 983fcabeef
commit 96e9e2211a
2 changed files with 312 additions and 219 deletions

View File

@ -7,26 +7,39 @@
#ifndef jit_shared_IonAssemblerBuffer_h
#define jit_shared_IonAssemblerBuffer_h
// needed for the definition of Label :(
#include "mozilla/Assertions.h"
#include "jit/shared/Assembler-shared.h"
namespace js {
namespace jit {
// This should theoretically reside inside of AssemblerBuffer, but that won't be
// nice AssemblerBuffer is templated, BufferOffset would be indirectly.
//
// A BufferOffset is the offset into a buffer, expressed in bytes of
// instructions.
// The offset into a buffer, in bytes.
class BufferOffset
{
int offset;
public:
friend BufferOffset nextOffset();
explicit BufferOffset(int offset_) : offset(offset_) {}
// Return the offset as a raw integer.
BufferOffset()
: offset(INT_MIN)
{ }
explicit BufferOffset(int offset_)
: offset(offset_)
{ }
explicit BufferOffset(Label* l)
: offset(l->offset())
{ }
explicit BufferOffset(RepatchLabel* l)
: offset(l->offset())
{ }
int getOffset() const { return offset; }
bool assigned() const { return offset != INT_MIN; }
// A BOffImm is a Branch Offset Immediate. It is an architecture-specific
// structure that holds the immediate for a pc relative branch. diffB takes
@ -43,26 +56,31 @@ class BufferOffset
MOZ_ASSERT(other->bound());
return BOffImm(offset - other->offset());
}
explicit BufferOffset(Label* l) : offset(l->offset()) {
}
explicit BufferOffset(RepatchLabel* l) : offset(l->offset()) {
}
BufferOffset() : offset(INT_MIN) {}
bool assigned() const { return offset != INT_MIN; }
};
template<int SliceSize>
struct BufferSlice {
class BufferSlice
{
protected:
BufferSlice<SliceSize>* prev_;
BufferSlice<SliceSize>* next_;
// How much data has been added to the current node.
uint32_t nodeSize_;
size_t bytelength_;
public:
mozilla::Array<uint8_t, SliceSize> instructions;
public:
explicit BufferSlice()
: prev_(nullptr), next_(nullptr), bytelength_(0)
{ }
size_t length() const { return bytelength_; }
static inline size_t Capacity() { return SliceSize; }
BufferSlice* getNext() const { return next_; }
BufferSlice* getPrev() const { return prev_; }
void setNext(BufferSlice<SliceSize>* next) {
MOZ_ASSERT(next_ == nullptr);
MOZ_ASSERT(next->prev_ == nullptr);
@ -70,223 +88,265 @@ struct BufferSlice {
next->prev_ = this;
}
mozilla::Array<uint8_t, SliceSize> instructions;
size_t size() const {
return nodeSize_;
}
explicit BufferSlice() : prev_(nullptr), next_(nullptr), nodeSize_(0) {}
void putBlob(uint32_t instSize, uint8_t* inst) {
if (inst != nullptr)
memcpy(&instructions[size()], inst, instSize);
nodeSize_ += instSize;
void putBytes(size_t numBytes, const uint8_t* source) {
MOZ_ASSERT(bytelength_ + numBytes <= SliceSize);
if (source)
memcpy(&instructions[length()], source, numBytes);
bytelength_ += numBytes;
}
};
template<int SliceSize, class Inst>
struct AssemblerBuffer
class AssemblerBuffer
{
public:
explicit AssemblerBuffer() : head(nullptr), tail(nullptr), m_oom(false),
m_bail(false), bufferSize(0), lifoAlloc_(8192) {}
protected:
typedef BufferSlice<SliceSize> Slice;
typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
protected:
// Doubly-linked list of BufferSlices, with the most recent in tail position.
Slice* head;
Slice* tail;
public:
bool m_oom;
bool m_bail;
// How much data has been added to the buffer thus far.
// How many bytes has been committed to the buffer thus far.
// Does not include tail.
uint32_t bufferSize;
uint32_t lastInstSize;
// Finger for speeding up accesses.
Slice* finger;
int finger_offset;
LifoAlloc lifoAlloc_;
public:
explicit AssemblerBuffer()
: head(nullptr),
tail(nullptr),
m_oom(false),
m_bail(false),
bufferSize(0),
lastInstSize(0),
finger(nullptr),
finger_offset(0),
lifoAlloc_(8192)
{ }
public:
bool isAligned(int alignment) const {
// Make sure the requested alignment is a power of two.
MOZ_ASSERT(IsPowerOfTwo(alignment));
return !(size() & (alignment - 1));
}
virtual Slice* newSlice(LifoAlloc& a) {
Slice* tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
if (!tmp) {
m_oom = true;
fail_oom();
return nullptr;
}
new (tmp) Slice;
return tmp;
return new (tmp) Slice;
}
bool ensureSpace(int size) {
if (tail != nullptr && tail->size() + size <= SliceSize)
// Space can exist in the most recent Slice.
if (tail && tail->length() + size <= tail->Capacity())
return true;
Slice* tmp = newSlice(lifoAlloc_);
if (tmp == nullptr)
return false;
if (tail != nullptr) {
bufferSize += tail->size();
tail->setNext(tmp);
}
tail = tmp;
if (head == nullptr) {
finger = tmp;
// Otherwise, a new Slice must be added.
Slice* slice = newSlice(lifoAlloc_);
if (slice == nullptr)
return fail_oom();
// If this is the first Slice in the buffer, add to head position.
if (!head) {
head = slice;
finger = slice;
finger_offset = 0;
head = tmp;
}
// Finish the last Slice and add the new Slice to the linked list.
if (tail) {
bufferSize += tail->length();
tail->setNext(slice);
}
tail = slice;
return true;
}
BufferOffset putByte(uint8_t value) {
return putBlob(sizeof(value), (uint8_t*)&value);
return putBytes(sizeof(value), (uint8_t*)&value);
}
BufferOffset putShort(uint16_t value) {
return putBlob(sizeof(value), (uint8_t*)&value);
return putBytes(sizeof(value), (uint8_t*)&value);
}
BufferOffset putInt(uint32_t value) {
return putBlob(sizeof(value), (uint8_t*)&value);
return putBytes(sizeof(value), (uint8_t*)&value);
}
BufferOffset putBlob(uint32_t instSize, uint8_t* inst) {
BufferOffset putBytes(uint32_t instSize, uint8_t* inst) {
if (!ensureSpace(instSize))
return BufferOffset();
BufferOffset ret = nextOffset();
tail->putBlob(instSize, inst);
tail->putBytes(instSize, inst);
return ret;
}
unsigned int size() const {
int executableSize;
if (tail != nullptr)
executableSize = bufferSize + tail->size();
else
executableSize = bufferSize;
return executableSize;
if (tail)
return bufferSize + tail->length();
return bufferSize;
}
bool oom() const {
return m_oom || m_bail;
}
bool bail() const {
return m_bail;
}
void fail_oom() {
bool oom() const { return m_oom || m_bail; }
bool bail() const { return m_bail; }
bool fail_oom() {
m_oom = true;
return false;
}
void fail_bail() {
bool fail_bail() {
m_bail = true;
return false;
}
// Finger for speeding up accesses.
Slice* finger;
unsigned int finger_offset;
void update_finger(Slice* finger_, int fingerOffset_) {
finger = finger_;
finger_offset = fingerOffset_;
}
private:
static const unsigned SliceDistanceRequiringFingerUpdate = 3;
Inst* getInstForwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) {
const int offset = off.getOffset();
int cursor = startOffset;
unsigned slicesSkipped = 0;
MOZ_ASSERT(offset >= cursor);
for (Slice *slice = start; slice != nullptr; slice = slice->getNext()) {
const int slicelen = slice->length();
// Is the offset within the bounds of this slice?
if (offset < cursor + slicelen) {
if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate)
update_finger(slice, cursor);
MOZ_ASSERT(offset - cursor < (int)slice->length());
return (Inst*)&slice->instructions[offset - cursor];
}
cursor += slicelen;
slicesSkipped++;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid instruction cursor.");
}
Inst* getInstBackwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) {
const int offset = off.getOffset();
int cursor = startOffset; // First (lowest) offset in the start Slice.
unsigned slicesSkipped = 0;
MOZ_ASSERT(offset < int(cursor + start->length()));
for (Slice* slice = start; slice != nullptr; ) {
// Is the offset within the bounds of this slice?
if (offset >= cursor) {
if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate)
update_finger(slice, cursor);
MOZ_ASSERT(offset - cursor < (int)slice->length());
return (Inst*)&slice->instructions[offset - cursor];
}
// Move the cursor to the start of the previous slice.
Slice* prev = slice->getPrev();
cursor -= prev->length();
slice = prev;
slicesSkipped++;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid instruction cursor.");
}
public:
Inst* getInst(BufferOffset off) {
int local_off = off.getOffset();
// Don't update the structure's finger in place, so there is the option
// to not update it.
Slice* cur = nullptr;
int cur_off;
// Get the offset that we'd be dealing with by walking through
// backwards.
int end_off = bufferSize - local_off;
// If end_off is negative, then it is in the last chunk, and there is no
// real work to be done.
if (end_off <= 0)
return (Inst*)&tail->instructions[-end_off];
bool used_finger = false;
int finger_off = abs((int)(local_off - finger_offset));
if (finger_off < Min(local_off, end_off)) {
// The finger offset is minimal, use the finger.
cur = finger;
cur_off = finger_offset;
used_finger = true;
} else if (local_off < end_off) {
// It is closest to the start.
cur = head;
cur_off = 0;
} else {
// It is closest to the end.
cur = tail;
cur_off = bufferSize;
const int offset = off.getOffset();
// Is the instruction in the last slice?
if (offset >= int(bufferSize))
return (Inst*)&tail->instructions[offset - bufferSize];
// How close is this offset to the previous one we looked up?
// If it is sufficiently far from the start and end of the buffer,
// use the finger to start midway through the list.
int finger_dist = abs(offset - finger_offset);
if (finger_dist < Min(offset, int(bufferSize - offset))) {
if (finger_offset < offset)
return getInstForwards(off, finger, finger_offset, true);
return getInstBackwards(off, finger, finger_offset, true);
}
int count = 0;
if (local_off < cur_off) {
for (; cur != nullptr; cur = cur->getPrev(), cur_off -= cur->size()) {
if (local_off >= cur_off) {
local_off -= cur_off;
break;
}
count++;
}
MOZ_ASSERT(cur != nullptr);
} else {
for (; cur != nullptr; cur = cur->getNext()) {
int cur_size = cur->size();
if (local_off < cur_off + cur_size) {
local_off -= cur_off;
break;
}
cur_off += cur_size;
count++;
}
MOZ_ASSERT(cur != nullptr);
}
if (count > 2 || used_finger) {
finger = cur;
finger_offset = cur_off;
}
// The offset within this node should not be larger than the node
// itself.
MOZ_ASSERT(local_off < (int)cur->size());
return (Inst*)&cur->instructions[local_off];
// Is the instruction closer to the start or to the end?
if (offset < int(bufferSize - offset))
return getInstForwards(off, head, 0);
// The last slice was already checked above, so start at the second-to-last.
Slice* prev = tail->getPrev();
return getInstBackwards(off, prev, bufferSize - prev->length());
}
BufferOffset nextOffset() const {
if (tail != nullptr)
return BufferOffset(bufferSize + tail->size());
else
return BufferOffset(bufferSize);
}
BufferOffset prevOffset() const {
MOZ_CRASH("Don't current record lastInstSize");
if (tail)
return BufferOffset(bufferSize + tail->length());
return BufferOffset(bufferSize);
}
// Break the instruction stream so we can go back and edit it at this point
void perforate() {
Slice* tmp = newSlice(lifoAlloc_);
if (!tmp) {
m_oom = true;
Slice* slice = newSlice(lifoAlloc_);
if (!slice) {
fail_oom();
return;
}
bufferSize += tail->size();
tail->setNext(tmp);
tail = tmp;
bufferSize += tail->length();
tail->setNext(slice);
tail = slice;
}
void executableCopy(uint8_t* dest_) {
if (this->oom())
return;
for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) {
memcpy(dest_, &cur->instructions, cur->size());
dest_ += cur->size();
}
}
class AssemblerBufferInstIterator {
private:
class AssemblerBufferInstIterator
{
BufferOffset bo;
AssemblerBuffer_* m_buffer;
public:
explicit AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_* buff)
: bo(off), m_buffer(buff)
{
}
explicit AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_* buffer)
: bo(off), m_buffer(buffer)
{ }
Inst* next() {
Inst* i = m_buffer->getInst(bo);
bo = BufferOffset(bo.getOffset() + i->size());
return cur();
}
Inst* cur() {
return m_buffer->getInst(bo);
}
};
public:
LifoAlloc lifoAlloc_;
};
} // ion
} // js
#endif /* jit_shared_IonAssemblerBuffer_h */
} // namespace ion
} // namespace js
#endif // jit_shared_IonAssemblerBuffer_h

View File

@ -115,8 +115,7 @@ typedef Vector<BufferOffset, 512, OldJitAllocPolicy> LoadOffsets;
// The allocation unit size for pools.
typedef int32_t PoolAllocUnit;
struct Pool
: public OldJitAllocPolicy
struct Pool : public OldJitAllocPolicy
{
private:
// The maximum program-counter relative offset below which the instruction
@ -155,15 +154,20 @@ struct Pool
LoadOffsets loadOffsets;
explicit Pool(size_t maxOffset, unsigned bias, LifoAlloc& lifoAlloc)
: maxOffset_(maxOffset), bias_(bias), numEntries_(0), buffSize(8),
poolData_(lifoAlloc.newArrayUninitialized<PoolAllocUnit>(buffSize)),
limitingUser(), limitingUsee(INT_MIN), loadOffsets()
{
}
: maxOffset_(maxOffset),
bias_(bias),
numEntries_(0),
buffSize(8),
poolData_(lifoAlloc.newArrayUninitialized<PoolAllocUnit>(buffSize)),
limitingUser(),
limitingUsee(INT_MIN),
loadOffsets()
{ }
static const unsigned Garbage = 0xa5a5a5a5;
Pool() : maxOffset_(Garbage), bias_(Garbage)
{
}
Pool()
: maxOffset_(Garbage), bias_(Garbage)
{ }
PoolAllocUnit* poolData() const {
return poolData_;
@ -241,68 +245,87 @@ struct Pool
limitingUsee = -1;
return true;
}
};
template <size_t SliceSize, size_t InstSize>
struct BufferSliceTail : public BufferSlice<SliceSize> {
struct BufferSliceTail : public BufferSlice<SliceSize>
{
private:
// Bit vector to record which instructions in the slice have a branch, so
// that they can be patched when the final positions are known.
mozilla::Array<uint8_t, (SliceSize / InstSize) / 8> isBranch_;
public:
Pool* pool;
// Flag when the last instruction in the slice is a 'natural' pool guard. A
// natural pool guard is a branch in the code that was not explicitly added
// to branch around the pool. For now an explict guard branch is always
// emitted, so this will always be false.
bool isNatural : 1;
BufferSliceTail* getNext() const {
return (BufferSliceTail*)this->next_;
}
explicit BufferSliceTail() : pool(nullptr), isNatural(true) {
public:
explicit BufferSliceTail()
: pool(nullptr), isNatural(true)
{
static_assert(SliceSize % (8 * InstSize) == 0, "SliceSize must be a multple of 8 * InstSize.");
mozilla::PodArrayZero(isBranch_);
}
void markNextAsBranch() {
// The caller is expected to ensure that the nodeSize_ < SliceSize. See
// the assembler's markNextAsBranch() method which firstly creates a new
// slice if necessary.
MOZ_ASSERT(this->nodeSize_ % InstSize == 0);
MOZ_ASSERT(this->nodeSize_ < SliceSize);
size_t idx = this->nodeSize_ / InstSize;
isBranch_[idx >> 3] |= 1 << (idx & 0x7);
}
public:
bool isBranch(unsigned idx) const {
MOZ_ASSERT(idx < this->nodeSize_ / InstSize);
MOZ_ASSERT(idx < this->bytelength_ / InstSize);
return (isBranch_[idx >> 3] >> (idx & 0x7)) & 1;
}
bool isNextBranch() const {
size_t size = this->nodeSize_;
size_t size = this->bytelength_;
MOZ_ASSERT(size < SliceSize);
return isBranch(size / InstSize);
}
void markNextAsBranch() {
// The caller is expected to ensure that the bytelength_ < SliceSize. See
// the assembler's markNextAsBranch() method which firstly creates a new
// slice if necessary.
MOZ_ASSERT(this->bytelength_ % InstSize == 0);
MOZ_ASSERT(this->bytelength_ < SliceSize);
size_t idx = this->bytelength_ / InstSize;
isBranch_[idx >> 3] |= 1 << (idx & 0x7);
}
BufferSliceTail* getNext() const {
return (BufferSliceTail*)this->next_;
}
};
// The InstSize is the sizeof(Inst) but is needed here because the buffer is
// defined before the Instruction.
template <size_t SliceSize, size_t InstSize, class Inst, class Asm>
struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst> {
struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst>
{
private:
// The PoolEntry index counter. Each PoolEntry is given a unique index,
// counting up from zero, and these can be mapped back to the actual pool
// entry offset after finishing the buffer, see poolEntryOffset().
size_t poolEntryCount;
public:
class PoolEntry {
class PoolEntry
{
size_t index_;
public:
explicit PoolEntry(size_t index) : index_(index) {
}
PoolEntry() : index_(-1) {
}
explicit PoolEntry(size_t index)
: index_(index)
{ }
PoolEntry()
: index_(-1)
{ }
size_t index() const {
return index_;
}
@ -384,14 +407,12 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
bool inhibitNops_;
public:
// A unique id within each JitContext, to identify pools in the debug
// spew. Set by the MacroAssembler, see getNextAssemblerId().
int id;
private:
// The buffer slices are in a double linked list. Pointers to the head and
// tail of this list:
// The buffer slices are in a double linked list.
BufferSlice* getHead() const {
return (BufferSlice*)this->head;
}
@ -413,15 +434,27 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
size_t instBufferAlign, size_t poolMaxOffset,
unsigned pcBias, uint32_t alignFillInst, uint32_t nopFillInst,
unsigned nopFill = 0)
: poolEntryCount(0), guardSize_(guardSize), headerSize_(headerSize),
poolMaxOffset_(poolMaxOffset), pcBias_(pcBias),
instBufferAlign_(instBufferAlign),
numDumps_(0), poolInfoSize_(8), poolInfo_(nullptr),
canNotPlacePool_(false), alignFillInst_(alignFillInst),
nopFillInst_(nopFillInst), nopFill_(nopFill), inhibitNops_(false),
id(-1)
{
}
: poolEntryCount(0),
guardSize_(guardSize),
headerSize_(headerSize),
poolMaxOffset_(poolMaxOffset),
pcBias_(pcBias),
pool_(),
instBufferAlign_(instBufferAlign),
numDumps_(0),
poolInfoSize_(8),
poolInfo_(nullptr),
canNotPlacePool_(false),
#ifdef DEBUG
canNotPlacePoolStartOffset_(0),
canNotPlacePoolMaxInst_(0),
#endif
alignFillInst_(alignFillInst),
nopFillInst_(nopFillInst),
nopFill_(nopFill),
inhibitNops_(false),
id(-1)
{ }
// We need to wait until an AutoJitContextAlloc is created by the
// MacroAssembler before allocating any space.
@ -526,7 +559,8 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
public:
BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
uint8_t* inst, uint8_t* data, PoolEntry* pe = nullptr,
bool markAsBranch = false) {
bool markAsBranch = false)
{
// The alloction of pool entries is not supported in a no-pool region,
// check.
MOZ_ASSERT_IF(numPoolEntries, !canNotPlacePool_);
@ -569,7 +603,7 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
*pe = retPE;
if (markAsBranch)
markNextAsBranch();
return this->putBlob(numInst * InstSize, inst);
return this->putBytes(numInst * InstSize, inst);
}
BufferOffset putInt(uint32_t value, bool markAsBranch = false) {
@ -614,7 +648,7 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
BufferOffset branch = this->nextOffset();
// Mark and emit the guard branch.
markNextAsBranch();
this->putBlob(guardSize_ * InstSize, nullptr);
this->putBytes(guardSize_ * InstSize, nullptr);
BufferOffset afterPool = this->nextOffset();
Asm::WritePoolGuard(branch, this->getInst(branch), afterPool);
@ -751,9 +785,7 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
return poolInfo_[cur - 1].finalPos - poolInfo_[cur - 1].offset;
}
void align(unsigned alignment)
{
// Restrict the alignment to a power of two for now.
void align(unsigned alignment) {
MOZ_ASSERT(IsPowerOfTwo(alignment));
// A pool many need to be dumped at this point, so insert NOP fill here.
@ -819,7 +851,7 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
size_t curInstOffset = 0;
for (BufferSlice* cur = getHead(); cur != nullptr; cur = cur->getNext()) {
uint32_t* src = (uint32_t*)&cur->instructions;
unsigned numInsts = cur->size() / InstSize;
unsigned numInsts = cur->length() / InstSize;
for (unsigned idx = 0; idx < numInsts; idx++, curInstOffset += InstSize) {
// Is the current instruction a branch?
if (cur->isBranch(idx)) {
@ -863,6 +895,7 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
}
};
} // ion
} // js
#endif /* jit_shared_IonAssemblerBufferWithConstantPools_h */
} // namespace ion
} // namespace js
#endif // jit_shared_IonAssemblerBufferWithConstantPools_h