2017-08-12 16:48:01 +00:00
|
|
|
/*
|
2022-10-23 02:55:20 +00:00
|
|
|
* Copyright (C) 2008-2019 Apple Inc. All rights reserved.
|
2017-08-12 16:48:01 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
|
|
|
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
|
|
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
|
|
|
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
|
|
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
|
|
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
|
|
|
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#if ENABLE(ASSEMBLER)
|
|
|
|
|
|
|
|
#include "ExecutableAllocator.h"
|
|
|
|
#include "JITCompilationEffort.h"
|
|
|
|
#include "stdint.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <wtf/Assertions.h>
|
|
|
|
#include <wtf/FastMalloc.h>
|
2020-08-29 13:27:11 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
#include <wtf/PtrTag.h>
|
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
#include <wtf/StdLibExtras.h>
|
2022-10-23 02:55:20 +00:00
|
|
|
#include <wtf/ThreadSpecific.h>
|
2020-08-29 13:27:11 +00:00
|
|
|
#include <wtf/UnalignedAccess.h>
|
2017-08-12 16:48:01 +00:00
|
|
|
|
|
|
|
namespace JSC {
|
2022-10-23 02:55:20 +00:00
|
|
|
class AssemblerData;
|
|
|
|
|
|
|
|
typedef ThreadSpecific<AssemblerData, WTF::CanBeGCThread::True> ThreadSpecificAssemblerData;
|
|
|
|
|
|
|
|
JS_EXPORT_PRIVATE ThreadSpecificAssemblerData& threadSpecificAssemblerData();
|
|
|
|
JS_EXPORT_PRIVATE ThreadSpecificAssemblerData& threadSpecificAssemblerHashes();
|
2017-08-12 16:48:01 +00:00
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
class LinkBuffer;
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
DECLARE_ALLOCATOR_WITH_HEAP_IDENTIFIER(AssemblerData);
|
|
|
|
|
2017-08-12 16:48:01 +00:00
|
|
|
struct AssemblerLabel {
|
2022-10-23 02:55:20 +00:00
|
|
|
inline AssemblerLabel() { setOffset(std::numeric_limits<uint32_t>::max()); }
|
|
|
|
inline AssemblerLabel(const AssemblerLabel& other) { setOffset(other.offset()); }
|
|
|
|
inline AssemblerLabel(AssemblerLabel&& other) { setOffset(other.offset()); }
|
|
|
|
inline explicit AssemblerLabel(uint32_t offset) { setOffset(offset); }
|
|
|
|
|
|
|
|
AssemblerLabel& operator=(const AssemblerLabel& other) { setOffset(other.offset()); return *this; }
|
|
|
|
AssemblerLabel& operator=(AssemblerLabel&& other) { setOffset(other.offset()); return *this; }
|
2017-08-12 16:48:01 +00:00
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
bool isSet() const { return (offset() != std::numeric_limits<uint32_t>::max()); }
|
|
|
|
|
|
|
|
inline AssemblerLabel labelAtOffset(int offset) const
|
2017-08-12 16:48:01 +00:00
|
|
|
{
|
2022-10-23 02:55:20 +00:00
|
|
|
return AssemblerLabel(this->offset() + offset);
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
bool operator==(const AssemblerLabel& other) const { return offset() == other.offset(); }
|
2017-08-12 16:48:01 +00:00
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
inline uint32_t offset() const
|
2017-08-12 16:48:01 +00:00
|
|
|
{
|
2022-10-23 02:55:20 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
return static_cast<uint32_t>(untagInt(m_offset, bitwise_cast<PtrTag>(this)));
|
|
|
|
#else
|
|
|
|
return m_offset;
|
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
private:
|
|
|
|
inline void setOffset(uint32_t offset)
|
|
|
|
{
|
|
|
|
#if CPU(ARM64E)
|
|
|
|
m_offset = tagInt(static_cast<uint64_t>(offset), bitwise_cast<PtrTag>(this));
|
|
|
|
#else
|
|
|
|
m_offset = offset;
|
|
|
|
#endif
|
|
|
|
}
|
2017-08-12 16:48:01 +00:00
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
uint64_t m_offset;
|
|
|
|
#else
|
2017-08-12 16:48:01 +00:00
|
|
|
uint32_t m_offset;
|
2022-10-23 02:55:20 +00:00
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class AssemblerData {
|
|
|
|
WTF_MAKE_NONCOPYABLE(AssemblerData);
|
2022-10-23 02:55:20 +00:00
|
|
|
static constexpr size_t InlineCapacity = 128;
|
2017-08-12 16:48:01 +00:00
|
|
|
public:
|
|
|
|
AssemblerData()
|
|
|
|
: m_buffer(m_inlineBuffer)
|
|
|
|
, m_capacity(InlineCapacity)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
AssemblerData(size_t initialCapacity)
|
|
|
|
{
|
|
|
|
if (initialCapacity <= InlineCapacity) {
|
|
|
|
m_capacity = InlineCapacity;
|
|
|
|
m_buffer = m_inlineBuffer;
|
|
|
|
} else {
|
|
|
|
m_capacity = initialCapacity;
|
2022-10-23 02:55:20 +00:00
|
|
|
m_buffer = static_cast<char*>(AssemblerDataMalloc::malloc(m_capacity));
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
AssemblerData(AssemblerData&& other)
|
|
|
|
{
|
|
|
|
if (other.isInlineBuffer()) {
|
|
|
|
ASSERT(other.m_capacity == InlineCapacity);
|
|
|
|
memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity);
|
|
|
|
m_buffer = m_inlineBuffer;
|
|
|
|
} else
|
|
|
|
m_buffer = other.m_buffer;
|
|
|
|
m_capacity = other.m_capacity;
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
other.m_buffer = other.m_inlineBuffer;
|
|
|
|
other.m_capacity = InlineCapacity;
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
AssemblerData& operator=(AssemblerData&& other)
|
|
|
|
{
|
|
|
|
if (m_buffer && !isInlineBuffer())
|
2022-10-23 02:55:20 +00:00
|
|
|
AssemblerDataMalloc::free(m_buffer);
|
2017-08-12 16:48:01 +00:00
|
|
|
|
|
|
|
if (other.isInlineBuffer()) {
|
|
|
|
ASSERT(other.m_capacity == InlineCapacity);
|
|
|
|
memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity);
|
|
|
|
m_buffer = m_inlineBuffer;
|
|
|
|
} else
|
|
|
|
m_buffer = other.m_buffer;
|
|
|
|
m_capacity = other.m_capacity;
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
other.m_buffer = other.m_inlineBuffer;
|
|
|
|
other.m_capacity = InlineCapacity;
|
2017-08-12 16:48:01 +00:00
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
void takeBufferIfLarger(AssemblerData&& other)
|
2017-08-12 16:48:01 +00:00
|
|
|
{
|
2022-10-23 02:55:20 +00:00
|
|
|
if (other.isInlineBuffer())
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (m_capacity >= other.m_capacity)
|
|
|
|
return;
|
|
|
|
|
2017-08-12 16:48:01 +00:00
|
|
|
if (m_buffer && !isInlineBuffer())
|
2022-10-23 02:55:20 +00:00
|
|
|
AssemblerDataMalloc::free(m_buffer);
|
|
|
|
|
|
|
|
m_buffer = other.m_buffer;
|
|
|
|
m_capacity = other.m_capacity;
|
|
|
|
|
|
|
|
other.m_buffer = other.m_inlineBuffer;
|
|
|
|
other.m_capacity = InlineCapacity;
|
|
|
|
}
|
|
|
|
|
|
|
|
~AssemblerData()
|
|
|
|
{
|
|
|
|
clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void clear()
|
|
|
|
{
|
|
|
|
if (m_buffer && !isInlineBuffer()) {
|
|
|
|
AssemblerDataMalloc::free(m_buffer);
|
|
|
|
m_capacity = InlineCapacity;
|
|
|
|
m_buffer = m_inlineBuffer;
|
|
|
|
}
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
char* buffer() const { return m_buffer; }
|
|
|
|
|
|
|
|
unsigned capacity() const { return m_capacity; }
|
|
|
|
|
|
|
|
void grow(unsigned extraCapacity = 0)
|
|
|
|
{
|
|
|
|
m_capacity = m_capacity + m_capacity / 2 + extraCapacity;
|
|
|
|
if (isInlineBuffer()) {
|
2022-10-23 02:55:20 +00:00
|
|
|
m_buffer = static_cast<char*>(AssemblerDataMalloc::malloc(m_capacity));
|
2017-08-12 16:48:01 +00:00
|
|
|
memcpy(m_buffer, m_inlineBuffer, InlineCapacity);
|
|
|
|
} else
|
2022-10-23 02:55:20 +00:00
|
|
|
m_buffer = static_cast<char*>(AssemblerDataMalloc::realloc(m_buffer, m_capacity));
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool isInlineBuffer() const { return m_buffer == m_inlineBuffer; }
|
|
|
|
char* m_buffer;
|
|
|
|
char m_inlineBuffer[InlineCapacity];
|
|
|
|
unsigned m_capacity;
|
|
|
|
};
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
class ARM64EHash {
|
|
|
|
public:
|
2022-10-23 02:55:20 +00:00
|
|
|
ARM64EHash(uint32_t initialHash)
|
|
|
|
: m_hash(initialHash)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE uint32_t update(uint32_t value)
|
2020-08-29 13:27:11 +00:00
|
|
|
{
|
|
|
|
uint64_t input = value ^ m_hash;
|
|
|
|
uint64_t a = static_cast<uint32_t>(tagInt(input, static_cast<PtrTag>(0)) >> 39);
|
|
|
|
uint64_t b = tagInt(input, static_cast<PtrTag>(0xb7e151628aed2a6a)) >> 23;
|
|
|
|
m_hash = a ^ b;
|
2022-10-23 02:55:20 +00:00
|
|
|
return m_hash;
|
2020-08-29 13:27:11 +00:00
|
|
|
}
|
2022-10-23 02:55:20 +00:00
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
private:
|
2022-10-23 02:55:20 +00:00
|
|
|
uint32_t m_hash;
|
2020-08-29 13:27:11 +00:00
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2017-08-12 16:48:01 +00:00
|
|
|
class AssemblerBuffer {
|
|
|
|
public:
|
|
|
|
AssemblerBuffer()
|
|
|
|
: m_storage()
|
|
|
|
, m_index(0)
|
2022-10-23 02:55:20 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
, m_hash(static_cast<uint32_t>(bitwise_cast<uint64_t>(this)))
|
|
|
|
, m_hashes()
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
auto& threadSpecificData = threadSpecificAssemblerData();
|
|
|
|
m_storage.takeBufferIfLarger(WTFMove(*threadSpecificData));
|
|
|
|
#if CPU(ARM64E)
|
|
|
|
auto& threadSpecificHashes = threadSpecificAssemblerHashes();
|
|
|
|
m_hashes.takeBufferIfLarger(WTFMove(*threadSpecificHashes));
|
|
|
|
ASSERT(m_storage.capacity() == m_hashes.capacity());
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
~AssemblerBuffer()
|
2017-08-12 16:48:01 +00:00
|
|
|
{
|
2022-10-23 02:55:20 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
ASSERT(m_storage.capacity() == m_hashes.capacity());
|
|
|
|
auto& threadSpecificHashes = threadSpecificAssemblerHashes();
|
|
|
|
threadSpecificHashes->takeBufferIfLarger(WTFMove(m_hashes));
|
|
|
|
#endif
|
|
|
|
auto& threadSpecificData = threadSpecificAssemblerData();
|
|
|
|
threadSpecificData->takeBufferIfLarger(WTFMove(m_storage));
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool isAvailable(unsigned space)
|
|
|
|
{
|
|
|
|
return m_index + space <= m_storage.capacity();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ensureSpace(unsigned space)
|
|
|
|
{
|
|
|
|
while (!isAvailable(space))
|
|
|
|
outOfLineGrow();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isAligned(int alignment) const
|
|
|
|
{
|
|
|
|
return !(m_index & (alignment - 1));
|
|
|
|
}
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
#if !CPU(ARM64)
|
2017-08-12 16:48:01 +00:00
|
|
|
void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
|
|
|
|
void putByte(int8_t value) { putIntegral(value); }
|
|
|
|
void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
|
|
|
|
void putShort(int16_t value) { putIntegral(value); }
|
|
|
|
void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
|
|
|
|
void putInt64(int64_t value) { putIntegral(value); }
|
2020-08-29 13:27:11 +00:00
|
|
|
#endif
|
|
|
|
void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
|
|
|
|
void putInt(int32_t value) { putIntegral(value); }
|
2017-08-12 16:48:01 +00:00
|
|
|
|
|
|
|
size_t codeSize() const
|
|
|
|
{
|
|
|
|
return m_index;
|
|
|
|
}
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
#if !CPU(ARM64)
|
2017-08-12 16:48:01 +00:00
|
|
|
void setCodeSize(size_t index)
|
|
|
|
{
|
|
|
|
// Warning: Only use this if you know exactly what you are doing.
|
|
|
|
// For example, say you want 40 bytes of nops, it's ok to grow
|
|
|
|
// and then fill 40 bytes of nops using bigger instructions.
|
|
|
|
m_index = index;
|
|
|
|
ASSERT(m_index <= m_storage.capacity());
|
|
|
|
}
|
2020-08-29 13:27:11 +00:00
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
|
|
|
|
AssemblerLabel label() const
|
|
|
|
{
|
|
|
|
return AssemblerLabel(m_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned debugOffset() { return m_index; }
|
|
|
|
|
2022-10-23 02:55:20 +00:00
|
|
|
AssemblerData&& releaseAssemblerData()
|
|
|
|
{
|
|
|
|
return WTFMove(m_storage);
|
|
|
|
}
|
|
|
|
|
|
|
|
#if CPU(ARM64E)
|
|
|
|
AssemblerData&& releaseAssemblerHashes()
|
|
|
|
{
|
|
|
|
return WTFMove(m_hashes);
|
|
|
|
}
|
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
|
|
|
|
// LocalWriter is a trick to keep the storage buffer and the index
|
|
|
|
// in memory while issuing multiple Stores.
|
|
|
|
// It is created in a block scope and its attribute can stay live
|
|
|
|
// between writes.
|
|
|
|
//
|
|
|
|
// LocalWriter *CANNOT* be mixed with other types of access to AssemblerBuffer.
|
|
|
|
// AssemblerBuffer cannot be used until its LocalWriter goes out of scope.
|
2020-08-29 13:27:11 +00:00
|
|
|
#if !CPU(ARM64) // If we ever need to use this on arm64e, we would need to make the checksum aware of this.
|
2017-08-12 16:48:01 +00:00
|
|
|
class LocalWriter {
|
|
|
|
public:
|
|
|
|
LocalWriter(AssemblerBuffer& buffer, unsigned requiredSpace)
|
|
|
|
: m_buffer(buffer)
|
|
|
|
{
|
|
|
|
buffer.ensureSpace(requiredSpace);
|
|
|
|
m_storageBuffer = buffer.m_storage.buffer();
|
|
|
|
m_index = buffer.m_index;
|
2022-10-23 02:55:20 +00:00
|
|
|
#if ASSERT_ENABLED
|
2017-08-12 16:48:01 +00:00
|
|
|
m_initialIndex = m_index;
|
|
|
|
m_requiredSpace = requiredSpace;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
~LocalWriter()
|
|
|
|
{
|
|
|
|
ASSERT(m_index - m_initialIndex <= m_requiredSpace);
|
|
|
|
ASSERT(m_buffer.m_index == m_initialIndex);
|
|
|
|
ASSERT(m_storageBuffer == m_buffer.m_storage.buffer());
|
|
|
|
m_buffer.m_index = m_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); }
|
|
|
|
void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); }
|
|
|
|
void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); }
|
|
|
|
void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); }
|
|
|
|
private:
|
|
|
|
template<typename IntegralType>
|
|
|
|
void putIntegralUnchecked(IntegralType value)
|
|
|
|
{
|
|
|
|
ASSERT(m_index + sizeof(IntegralType) <= m_buffer.m_storage.capacity());
|
2020-08-29 13:27:11 +00:00
|
|
|
WTF::unalignedStore<IntegralType>(m_storageBuffer + m_index, value);
|
2017-08-12 16:48:01 +00:00
|
|
|
m_index += sizeof(IntegralType);
|
|
|
|
}
|
|
|
|
AssemblerBuffer& m_buffer;
|
|
|
|
char* m_storageBuffer;
|
|
|
|
unsigned m_index;
|
2022-10-23 02:55:20 +00:00
|
|
|
#if ASSERT_ENABLED
|
2017-08-12 16:48:01 +00:00
|
|
|
unsigned m_initialIndex;
|
|
|
|
unsigned m_requiredSpace;
|
|
|
|
#endif
|
|
|
|
};
|
2020-08-29 13:27:11 +00:00
|
|
|
#endif // !CPU(ARM64)
|
|
|
|
|
|
|
|
#if !CPU(ARM64) // If we were to define this on arm64e, we'd need a way to update the hash as we write directly into the buffer.
|
|
|
|
void* data() const { return m_storage.buffer(); }
|
|
|
|
#endif
|
|
|
|
|
2017-08-12 16:48:01 +00:00
|
|
|
protected:
|
|
|
|
template<typename IntegralType>
|
|
|
|
void putIntegral(IntegralType value)
|
|
|
|
{
|
|
|
|
unsigned nextIndex = m_index + sizeof(IntegralType);
|
|
|
|
if (UNLIKELY(nextIndex > m_storage.capacity()))
|
|
|
|
outOfLineGrow();
|
2020-08-29 13:27:11 +00:00
|
|
|
putIntegralUnchecked<IntegralType>(value);
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename IntegralType>
|
|
|
|
void putIntegralUnchecked(IntegralType value)
|
|
|
|
{
|
2020-08-29 13:27:11 +00:00
|
|
|
#if CPU(ARM64)
|
|
|
|
static_assert(sizeof(value) == 4, "");
|
|
|
|
#if CPU(ARM64E)
|
2022-10-23 02:55:20 +00:00
|
|
|
uint32_t hash = m_hash.update(value);
|
|
|
|
WTF::unalignedStore<uint32_t>(m_hashes.buffer() + m_index, hash);
|
2020-08-29 13:27:11 +00:00
|
|
|
#endif
|
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
ASSERT(isAvailable(sizeof(IntegralType)));
|
2020-08-29 13:27:11 +00:00
|
|
|
WTF::unalignedStore<IntegralType>(m_storage.buffer() + m_index, value);
|
2017-08-12 16:48:01 +00:00
|
|
|
m_index += sizeof(IntegralType);
|
|
|
|
}
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
private:
|
2017-08-12 16:48:01 +00:00
|
|
|
void grow(int extraCapacity = 0)
|
|
|
|
{
|
|
|
|
m_storage.grow(extraCapacity);
|
2022-10-23 02:55:20 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
m_hashes.grow(extraCapacity);
|
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
NEVER_INLINE void outOfLineGrow()
|
|
|
|
{
|
|
|
|
m_storage.grow();
|
2022-10-23 02:55:20 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
m_hashes.grow();
|
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
}
|
|
|
|
|
2020-08-29 13:27:11 +00:00
|
|
|
#if !CPU(ARM64)
|
2017-08-12 16:48:01 +00:00
|
|
|
friend LocalWriter;
|
2020-08-29 13:27:11 +00:00
|
|
|
#endif
|
|
|
|
friend LinkBuffer;
|
2017-08-12 16:48:01 +00:00
|
|
|
|
|
|
|
AssemblerData m_storage;
|
|
|
|
unsigned m_index;
|
2020-08-29 13:27:11 +00:00
|
|
|
#if CPU(ARM64E)
|
|
|
|
ARM64EHash m_hash;
|
2022-10-23 02:55:20 +00:00
|
|
|
AssemblerData m_hashes;
|
2020-08-29 13:27:11 +00:00
|
|
|
#endif
|
2017-08-12 16:48:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace JSC
|
|
|
|
|
|
|
|
#endif // ENABLE(ASSEMBLER)
|