2023-04-20 08:37:59 -07:00

77599 lines
3.1 MiB

/*
* Copyright (C) 2023 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* Autogenerated, do not modify.
*/
#pragma once
#include "ArithProfile.h"
#include "BytecodeDumper.h"
#include "Fits.h"
#include "GetByIdMetadata.h"
#include "GetByValHistory.h"
#include "Instruction.h"
#include "IterationModeMetadata.h"
#include "Opcode.h"
#include "PrivateFieldPutKind.h"
#include "PutByIdStatus.h"
#include "PutByIdFlags.h"
#include "ToThisStatus.h"
namespace JSC {
void dumpBytecode(BytecodeDumperBase* dumper, InstructionStream::Offset, const Instruction*);
#if ENABLE(WEBASSEMBLY)
void dumpWasm(BytecodeDumperBase* dumper, InstructionStream::Offset, const Instruction*);
#endif // ENABLE(WEBASSEMBLY)
struct OpIteratorOpen : public Instruction {
static constexpr OpcodeID opcodeID = op_iterator_open;
static constexpr size_t length = 7;
enum Checkpoints : uint8_t {
symbolCall,
getNext,
numberOfCheckpoints,
};
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister iterator, VirtualRegister next, VirtualRegister symbolIterator, VirtualRegister iterable, unsigned stackOffset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, iterator, next, symbolIterator, iterable, stackOffset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister iterator, VirtualRegister next, VirtualRegister symbolIterator, VirtualRegister iterable, unsigned stackOffset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, iterator, next, symbolIterator, iterable, stackOffset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister iterator, VirtualRegister next, VirtualRegister symbolIterator, VirtualRegister iterable, unsigned stackOffset)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, iterator, next, symbolIterator, iterable, stackOffset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister iterator, VirtualRegister next, VirtualRegister symbolIterator, VirtualRegister iterable, unsigned stackOffset, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, iterator, next, symbolIterator, iterable, stackOffset, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister iterator, VirtualRegister next, VirtualRegister symbolIterator, VirtualRegister iterable, unsigned stackOffset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, iterator, next, symbolIterator, iterable, stackOffset, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, iterator, next, symbolIterator, iterable, stackOffset, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, iterator, next, symbolIterator, iterable, stackOffset, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& iterator, VirtualRegister& next, VirtualRegister& symbolIterator, VirtualRegister& iterable, unsigned& stackOffset, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(iterator)
&& Fits<VirtualRegister, __size>::check(next)
&& Fits<VirtualRegister, __size>::check(symbolIterator)
&& Fits<VirtualRegister, __size>::check(iterable)
&& Fits<unsigned, __size>::check(stackOffset)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister iterator, VirtualRegister next, VirtualRegister symbolIterator, VirtualRegister iterable, unsigned stackOffset, unsigned __metadataID)
{
gen->setUsesCheckpoints();
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, iterator, next, symbolIterator, iterable, stackOffset, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(iterator));
gen->write(Fits<VirtualRegister, __size>::convert(next));
gen->write(Fits<VirtualRegister, __size>::convert(symbolIterator));
gen->write(Fits<VirtualRegister, __size>::convert(iterable));
gen->write(Fits<unsigned, __size>::convert(stackOffset));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**iterator_open"[2 - __sizeShiftAmount]);
dumper->dumpOperand("iterator", m_iterator, true);
dumper->dumpOperand("next", m_next, false);
dumper->dumpOperand("symbolIterator", m_symbolIterator, false);
dumper->dumpOperand("iterable", m_iterable, false);
dumper->dumpOperand("stackOffset", m_stackOffset, false);
}
OpIteratorOpen(const uint8_t* stream)
: m_iterator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_next(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_symbolIterator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_iterable(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIteratorOpen(const uint16_t* stream)
: m_iterator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_next(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_symbolIterator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_iterable(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIteratorOpen(const uint32_t* stream)
: m_iterator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_next(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_symbolIterator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_iterable(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIteratorOpen decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setIterator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIterator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIterator<OpcodeSize::Wide16>(value, func);
else
setIterator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIterator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setNext(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setNext<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setNext<OpcodeSize::Wide16>(value, func);
else
setNext<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNext(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSymbolIterator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSymbolIterator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSymbolIterator<OpcodeSize::Wide16>(value, func);
else
setSymbolIterator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSymbolIterator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIterable(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIterable<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIterable<OpcodeSize::Wide16>(value, func);
else
setIterable<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIterable(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setStackOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setStackOffset<OpcodeSize::Wide16>(value, func);
else
setStackOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_iterator_open;
Metadata(const OpIteratorOpen&) { }
IterationModeMetadata m_iterationMetadata;
ValueProfile m_iterableProfile;
LLIntCallLinkInfo m_callLinkInfo;
ValueProfile m_iteratorProfile;
GetByIdModeMetadata m_modeMetadata;
ValueProfile m_nextProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_iterator;
VirtualRegister m_next;
VirtualRegister m_symbolIterator;
VirtualRegister m_iterable;
unsigned m_stackOffset;
unsigned m_metadataID;
};
static_assert(OpIteratorOpen::length > OpIteratorOpen::numberOfCheckpoints, "FullBytecodeLivess relies on the length of OpIteratorOpen being greater than the number of checkpoints");
struct OpTailCallVarargs : public Instruction {
static constexpr OpcodeID opcodeID = op_tail_call_varargs;
static constexpr size_t length = 8;
enum Tmps : uint8_t {
argCountIncludingThis,
};
enum Checkpoints : uint8_t {
determiningArgCount,
makeCall,
numberOfCheckpoints,
};
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, VirtualRegister& thisValue, VirtualRegister& arguments, VirtualRegister& firstFree, int& firstVarArg, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<VirtualRegister, __size>::check(arguments)
&& Fits<VirtualRegister, __size>::check(firstFree)
&& Fits<int, __size>::check(firstVarArg)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
gen->setUsesCheckpoints();
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<VirtualRegister, __size>::convert(arguments));
gen->write(Fits<VirtualRegister, __size>::convert(firstFree));
gen->write(Fits<int, __size>::convert(firstVarArg));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**tail_call_varargs"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("arguments", m_arguments, false);
dumper->dumpOperand("firstFree", m_firstFree, false);
dumper->dumpOperand("firstVarArg", m_firstVarArg, false);
}
OpTailCallVarargs(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Narrow>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTailCallVarargs(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide16>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTailCallVarargs(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide32>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTailCallVarargs decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide16>(value, func);
else
setArguments<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide16>(value, func);
else
setFirstFree<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide16>(value, func);
else
setFirstVarArg<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (!Fits<int, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 5 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<int, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_tail_call_varargs;
Metadata(const OpTailCallVarargs&) { }
ArrayProfile m_arrayProfile;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
VirtualRegister m_thisValue;
VirtualRegister m_arguments;
VirtualRegister m_firstFree;
int m_firstVarArg;
unsigned m_metadataID;
};
static_assert(OpTailCallVarargs::length > OpTailCallVarargs::numberOfCheckpoints, "FullBytecodeLivess relies on the length of OpTailCallVarargs being greater than the number of checkpoints");
struct OpConstructVarargs : public Instruction {
static constexpr OpcodeID opcodeID = op_construct_varargs;
static constexpr size_t length = 8;
enum Tmps : uint8_t {
argCountIncludingThis,
};
enum Checkpoints : uint8_t {
determiningArgCount,
makeCall,
numberOfCheckpoints,
};
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, VirtualRegister& thisValue, VirtualRegister& arguments, VirtualRegister& firstFree, int& firstVarArg, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<VirtualRegister, __size>::check(arguments)
&& Fits<VirtualRegister, __size>::check(firstFree)
&& Fits<int, __size>::check(firstVarArg)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
gen->setUsesCheckpoints();
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<VirtualRegister, __size>::convert(arguments));
gen->write(Fits<VirtualRegister, __size>::convert(firstFree));
gen->write(Fits<int, __size>::convert(firstVarArg));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**construct_varargs"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("arguments", m_arguments, false);
dumper->dumpOperand("firstFree", m_firstFree, false);
dumper->dumpOperand("firstVarArg", m_firstVarArg, false);
}
OpConstructVarargs(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Narrow>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpConstructVarargs(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide16>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpConstructVarargs(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide32>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpConstructVarargs decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide16>(value, func);
else
setArguments<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide16>(value, func);
else
setFirstFree<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide16>(value, func);
else
setFirstVarArg<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (!Fits<int, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 5 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<int, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_construct_varargs;
Metadata(const OpConstructVarargs&) { }
ArrayProfile m_arrayProfile;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
VirtualRegister m_thisValue;
VirtualRegister m_arguments;
VirtualRegister m_firstFree;
int m_firstVarArg;
unsigned m_metadataID;
};
static_assert(OpConstructVarargs::length > OpConstructVarargs::numberOfCheckpoints, "FullBytecodeLivess relies on the length of OpConstructVarargs being greater than the number of checkpoints");
struct OpCallVarargs : public Instruction {
static constexpr OpcodeID opcodeID = op_call_varargs;
static constexpr size_t length = 8;
enum Tmps : uint8_t {
argCountIncludingThis,
};
enum Checkpoints : uint8_t {
determiningArgCount,
makeCall,
numberOfCheckpoints,
};
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, VirtualRegister& thisValue, VirtualRegister& arguments, VirtualRegister& firstFree, int& firstVarArg, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<VirtualRegister, __size>::check(arguments)
&& Fits<VirtualRegister, __size>::check(firstFree)
&& Fits<int, __size>::check(firstVarArg)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
gen->setUsesCheckpoints();
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<VirtualRegister, __size>::convert(arguments));
gen->write(Fits<VirtualRegister, __size>::convert(firstFree));
gen->write(Fits<int, __size>::convert(firstVarArg));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_varargs"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("arguments", m_arguments, false);
dumper->dumpOperand("firstFree", m_firstFree, false);
dumper->dumpOperand("firstVarArg", m_firstVarArg, false);
}
OpCallVarargs(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Narrow>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCallVarargs(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide16>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCallVarargs(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide32>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCallVarargs decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide16>(value, func);
else
setArguments<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide16>(value, func);
else
setFirstFree<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide16>(value, func);
else
setFirstVarArg<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (!Fits<int, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 5 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<int, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_call_varargs;
Metadata(const OpCallVarargs&) { }
ArrayProfile m_arrayProfile;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
VirtualRegister m_thisValue;
VirtualRegister m_arguments;
VirtualRegister m_firstFree;
int m_firstVarArg;
unsigned m_metadataID;
};
static_assert(OpCallVarargs::length > OpCallVarargs::numberOfCheckpoints, "FullBytecodeLivess relies on the length of OpCallVarargs being greater than the number of checkpoints");
struct OpIteratorNext : public Instruction {
static constexpr OpcodeID opcodeID = op_iterator_next;
static constexpr size_t length = 8;
enum Tmps : uint8_t {
nextResult,
};
enum Checkpoints : uint8_t {
computeNext,
getDone,
getValue,
numberOfCheckpoints,
};
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister done, VirtualRegister value, VirtualRegister iterable, VirtualRegister next, VirtualRegister iterator, unsigned stackOffset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, done, value, iterable, next, iterator, stackOffset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister done, VirtualRegister value, VirtualRegister iterable, VirtualRegister next, VirtualRegister iterator, unsigned stackOffset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, done, value, iterable, next, iterator, stackOffset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister done, VirtualRegister value, VirtualRegister iterable, VirtualRegister next, VirtualRegister iterator, unsigned stackOffset)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, done, value, iterable, next, iterator, stackOffset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister done, VirtualRegister value, VirtualRegister iterable, VirtualRegister next, VirtualRegister iterator, unsigned stackOffset, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, done, value, iterable, next, iterator, stackOffset, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister done, VirtualRegister value, VirtualRegister iterable, VirtualRegister next, VirtualRegister iterator, unsigned stackOffset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, done, value, iterable, next, iterator, stackOffset, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, done, value, iterable, next, iterator, stackOffset, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, done, value, iterable, next, iterator, stackOffset, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& done, VirtualRegister& value, VirtualRegister& iterable, VirtualRegister& next, VirtualRegister& iterator, unsigned& stackOffset, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(done)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<VirtualRegister, __size>::check(iterable)
&& Fits<VirtualRegister, __size>::check(next)
&& Fits<VirtualRegister, __size>::check(iterator)
&& Fits<unsigned, __size>::check(stackOffset)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister done, VirtualRegister value, VirtualRegister iterable, VirtualRegister next, VirtualRegister iterator, unsigned stackOffset, unsigned __metadataID)
{
gen->setUsesCheckpoints();
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, done, value, iterable, next, iterator, stackOffset, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(done));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<VirtualRegister, __size>::convert(iterable));
gen->write(Fits<VirtualRegister, __size>::convert(next));
gen->write(Fits<VirtualRegister, __size>::convert(iterator));
gen->write(Fits<unsigned, __size>::convert(stackOffset));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**iterator_next"[2 - __sizeShiftAmount]);
dumper->dumpOperand("done", m_done, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("iterable", m_iterable, false);
dumper->dumpOperand("next", m_next, false);
dumper->dumpOperand("iterator", m_iterator, false);
dumper->dumpOperand("stackOffset", m_stackOffset, false);
}
OpIteratorNext(const uint8_t* stream)
: m_done(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_iterable(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_next(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_iterator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIteratorNext(const uint16_t* stream)
: m_done(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_iterable(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_next(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_iterator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIteratorNext(const uint32_t* stream)
: m_done(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_iterable(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_next(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_iterator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIteratorNext decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDone(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDone<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDone<OpcodeSize::Wide16>(value, func);
else
setDone<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDone(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIterable(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIterable<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIterable<OpcodeSize::Wide16>(value, func);
else
setIterable<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIterable(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setNext(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setNext<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setNext<OpcodeSize::Wide16>(value, func);
else
setNext<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNext(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIterator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIterator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIterator<OpcodeSize::Wide16>(value, func);
else
setIterator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIterator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setStackOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setStackOffset<OpcodeSize::Wide16>(value, func);
else
setStackOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 5 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_iterator_next;
Metadata(const OpIteratorNext&) { }
IterationModeMetadata m_iterationMetadata;
ArrayProfile m_iterableProfile;
LLIntCallLinkInfo m_callLinkInfo;
ValueProfile m_nextResultProfile;
GetByIdModeMetadata m_doneModeMetadata;
ValueProfile m_doneProfile;
GetByIdModeMetadata m_valueModeMetadata;
ValueProfile m_valueProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_done;
VirtualRegister m_value;
VirtualRegister m_iterable;
VirtualRegister m_next;
VirtualRegister m_iterator;
unsigned m_stackOffset;
unsigned m_metadataID;
};
static_assert(OpIteratorNext::length > OpIteratorNext::numberOfCheckpoints, "FullBytecodeLivess relies on the length of OpIteratorNext being greater than the number of checkpoints");
struct OpGetByVal : public Instruction {
static constexpr OpcodeID opcodeID = op_get_by_val;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_by_val"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpGetByVal(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByVal(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByVal(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetByVal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_by_val;
Metadata(const OpGetByVal&) { }
ValueProfile m_profile;
ArrayProfile m_arrayProfile;
GetByValHistory m_seenIdentifiers;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
unsigned m_metadataID;
};
struct OpGetPrivateName : public Instruction {
static constexpr OpcodeID opcodeID = op_get_private_name;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_private_name"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpGetPrivateName(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetPrivateName(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetPrivateName(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetPrivateName decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_private_name;
Metadata(const OpGetPrivateName&) { }
ValueProfile m_profile;
StructureID m_structureID;
unsigned m_offset;
WriteBarrier<JSCell> m_property;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
unsigned m_metadataID;
};
struct OpGetByIdDirect : public Instruction {
static constexpr OpcodeID opcodeID = op_get_by_id_direct;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, unsigned& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_by_id_direct"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpGetByIdDirect(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByIdDirect(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByIdDirect(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetByIdDirect decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_by_id_direct;
Metadata(const OpGetByIdDirect&) { }
ValueProfile m_profile;
StructureID m_structureID;
unsigned m_offset;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
unsigned m_property;
unsigned m_metadataID;
};
struct OpPutPrivateName : public Instruction {
static constexpr OpcodeID opcodeID = op_put_private_name;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, PrivateFieldPutKind putKind)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, value, putKind);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, PrivateFieldPutKind putKind)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, base, property, value, putKind, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, PrivateFieldPutKind putKind)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, base, property, value, putKind, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, PrivateFieldPutKind putKind, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, value, putKind, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, PrivateFieldPutKind putKind)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, value, putKind, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, value, putKind, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, value, putKind, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& property, VirtualRegister& value, PrivateFieldPutKind& putKind, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<PrivateFieldPutKind, __size>::check(putKind)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, PrivateFieldPutKind putKind, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, value, putKind, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<PrivateFieldPutKind, __size>::convert(putKind));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_private_name"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("putKind", m_putKind, false);
}
OpPutPrivateName(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_putKind(Fits<PrivateFieldPutKind, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutPrivateName(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_putKind(Fits<PrivateFieldPutKind, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutPrivateName(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_putKind(Fits<PrivateFieldPutKind, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutPrivateName decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPutKind(PrivateFieldPutKind value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setPutKind<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setPutKind<OpcodeSize::Wide16>(value, func);
else
setPutKind<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPutKind(PrivateFieldPutKind value, Functor func)
{
if (!Fits<PrivateFieldPutKind, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<PrivateFieldPutKind, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_put_private_name;
Metadata(const OpPutPrivateName&) { }
StructureID m_oldStructureID;
WriteBarrier<JSCell> m_property;
unsigned m_offset;
StructureID m_newStructureID;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_value;
PrivateFieldPutKind m_putKind;
unsigned m_metadataID;
};
struct OpAdd : public Instruction {
static constexpr OpcodeID opcodeID = op_add;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, OperandTypes& operandTypes, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<OperandTypes, __size>::check(operandTypes)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, operandTypes, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<OperandTypes, __size>::convert(operandTypes));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**add"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("operandTypes", m_operandTypes, false);
}
OpAdd(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpAdd(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpAdd(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpAdd decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide16>(value, func);
else
setOperandTypes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (!Fits<OperandTypes, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<OperandTypes, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_add;
Metadata(const OpAdd&) { }
BinaryArithProfile m_arithProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
OperandTypes m_operandTypes;
unsigned m_metadataID;
};
struct OpGetArgument : public Instruction {
static constexpr OpcodeID opcodeID = op_get_argument;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, int index)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, index);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, int index)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, index, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, int index)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, index, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, int index, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, index, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, int index)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, index, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, index, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, index, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, int& index, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<int, __size>::check(index)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, int index, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, index, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<int, __size>::convert(index));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_argument"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("index", m_index, false);
}
OpGetArgument(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_index(Fits<int, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetArgument(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_index(Fits<int, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetArgument(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_index(Fits<int, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetArgument decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(int value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(int value, Functor func)
{
if (!Fits<int, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<int, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_argument;
Metadata(const OpGetArgument&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
int m_index;
unsigned m_metadataID;
};
struct OpTryGetById : public Instruction {
static constexpr OpcodeID opcodeID = op_try_get_by_id;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, unsigned& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**try_get_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpTryGetById(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTryGetById(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTryGetById(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTryGetById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_try_get_by_id;
Metadata(const OpTryGetById&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
unsigned m_property;
unsigned m_metadataID;
};
struct OpCall : public Instruction {
static constexpr OpcodeID opcodeID = op_call;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, argc, argv);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, argc, argv, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, argc, argv, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, argc, argv, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, argc, argv, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, unsigned& argc, unsigned& argv, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<unsigned, __size>::check(argc)
&& Fits<unsigned, __size>::check(argv)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, argc, argv, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<unsigned, __size>::convert(argc));
gen->write(Fits<unsigned, __size>::convert(argv));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("argc", m_argc, false);
dumper->dumpOperand("argv", m_argv, false);
}
OpCall(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCall(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCall(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCall decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgc(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide16>(value, func);
else
setArgc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgc(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setArgv(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide16>(value, func);
else
setArgv<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgv(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_call;
Metadata(const OpCall&) { }
LLIntCallLinkInfo m_callLinkInfo;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
unsigned m_argc;
unsigned m_argv;
unsigned m_metadataID;
};
struct OpTailCall : public Instruction {
static constexpr OpcodeID opcodeID = op_tail_call;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, argc, argv);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, argc, argv, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, argc, argv, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, argc, argv, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, argc, argv, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, unsigned& argc, unsigned& argv, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<unsigned, __size>::check(argc)
&& Fits<unsigned, __size>::check(argv)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, argc, argv, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<unsigned, __size>::convert(argc));
gen->write(Fits<unsigned, __size>::convert(argv));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**tail_call"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("argc", m_argc, false);
dumper->dumpOperand("argv", m_argv, false);
}
OpTailCall(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTailCall(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTailCall(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTailCall decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgc(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide16>(value, func);
else
setArgc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgc(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setArgv(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide16>(value, func);
else
setArgv<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgv(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_tail_call;
Metadata(const OpTailCall&) { }
LLIntCallLinkInfo m_callLinkInfo;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
unsigned m_argc;
unsigned m_argv;
unsigned m_metadataID;
};
struct OpCallEval : public Instruction {
static constexpr OpcodeID opcodeID = op_call_eval;
static constexpr size_t length = 7;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, argc, argv, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, argc, argv, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, ECMAMode ecmaMode)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, argc, argv, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, ECMAMode ecmaMode, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, argc, argv, ecmaMode, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, ecmaMode, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, ecmaMode, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, argc, argv, ecmaMode, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, unsigned& argc, unsigned& argv, ECMAMode& ecmaMode, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<unsigned, __size>::check(argc)
&& Fits<unsigned, __size>::check(argv)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, ECMAMode ecmaMode, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, argc, argv, ecmaMode, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<unsigned, __size>::convert(argc));
gen->write(Fits<unsigned, __size>::convert(argv));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_eval"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("argc", m_argc, false);
dumper->dumpOperand("argv", m_argv, false);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpCallEval(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCallEval(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCallEval(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCallEval decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgc(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide16>(value, func);
else
setArgc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgc(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setArgv(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide16>(value, func);
else
setArgv<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgv(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_call_eval;
Metadata(const OpCallEval&) { }
LLIntCallLinkInfo m_callLinkInfo;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
unsigned m_argc;
unsigned m_argv;
ECMAMode m_ecmaMode;
unsigned m_metadataID;
};
struct OpTailCallForwardArguments : public Instruction {
static constexpr OpcodeID opcodeID = op_tail_call_forward_arguments;
static constexpr size_t length = 8;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, VirtualRegister& thisValue, VirtualRegister& arguments, VirtualRegister& firstFree, int& firstVarArg, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<VirtualRegister, __size>::check(arguments)
&& Fits<VirtualRegister, __size>::check(firstFree)
&& Fits<int, __size>::check(firstVarArg)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, VirtualRegister thisValue, VirtualRegister arguments, VirtualRegister firstFree, int firstVarArg, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, thisValue, arguments, firstFree, firstVarArg, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<VirtualRegister, __size>::convert(arguments));
gen->write(Fits<VirtualRegister, __size>::convert(firstFree));
gen->write(Fits<int, __size>::convert(firstVarArg));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**tail_call_forward_arguments"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("arguments", m_arguments, false);
dumper->dumpOperand("firstFree", m_firstFree, false);
dumper->dumpOperand("firstVarArg", m_firstVarArg, false);
}
OpTailCallForwardArguments(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Narrow>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTailCallForwardArguments(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide16>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTailCallForwardArguments(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_firstFree(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
, m_firstVarArg(Fits<int, OpcodeSize::Wide32>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTailCallForwardArguments decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide16>(value, func);
else
setArguments<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstFree<OpcodeSize::Wide16>(value, func);
else
setFirstFree<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstFree(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFirstVarArg<OpcodeSize::Wide16>(value, func);
else
setFirstVarArg<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFirstVarArg(int value, Functor func)
{
if (!Fits<int, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 5 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<int, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_tail_call_forward_arguments;
Metadata(const OpTailCallForwardArguments&) { }
ArrayProfile m_arrayProfile;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
VirtualRegister m_thisValue;
VirtualRegister m_arguments;
VirtualRegister m_firstFree;
int m_firstVarArg;
unsigned m_metadataID;
};
struct OpConstruct : public Instruction {
static constexpr OpcodeID opcodeID = op_construct;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, argc, argv);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, argc, argv, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, argc, argv, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, argc, argv, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, argc, argv, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, argc, argv, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, unsigned& argc, unsigned& argv, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<unsigned, __size>::check(argc)
&& Fits<unsigned, __size>::check(argv)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned argc, unsigned argv, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, argc, argv, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<unsigned, __size>::convert(argc));
gen->write(Fits<unsigned, __size>::convert(argv));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**construct"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("argc", m_argc, false);
dumper->dumpOperand("argv", m_argv, false);
}
OpConstruct(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpConstruct(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpConstruct(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_argv(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpConstruct decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgc(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide16>(value, func);
else
setArgc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgc(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setArgv(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide16>(value, func);
else
setArgv<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgv(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_construct;
Metadata(const OpConstruct&) { }
LLIntCallLinkInfo m_callLinkInfo;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
unsigned m_argc;
unsigned m_argv;
unsigned m_metadataID;
};
struct OpNewArrayWithSize : public Instruction {
static constexpr OpcodeID opcodeID = op_new_array_with_size;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister length)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, length);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister length)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, length, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister length)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, length, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister length, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, length, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister length)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, length, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, length, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, length, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& length, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(length)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister length, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, length, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(length));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_array_with_size"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("length", m_length, false);
}
OpNewArrayWithSize(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_length(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArrayWithSize(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArrayWithSize(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewArrayWithSize decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLength<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLength<OpcodeSize::Wide16>(value, func);
else
setLength<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_new_array_with_size;
Metadata(const OpNewArrayWithSize&) { }
ArrayAllocationProfile m_arrayAllocationProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_length;
unsigned m_metadataID;
};
struct OpNewObject : public Instruction {
static constexpr OpcodeID opcodeID = op_new_object;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned inlineCapacity)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, inlineCapacity);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned inlineCapacity)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, inlineCapacity, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, unsigned inlineCapacity)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, inlineCapacity, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned inlineCapacity, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, inlineCapacity, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, unsigned inlineCapacity)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, inlineCapacity, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, inlineCapacity, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, inlineCapacity, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, unsigned& inlineCapacity, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<unsigned, __size>::check(inlineCapacity)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, unsigned inlineCapacity, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, inlineCapacity, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<unsigned, __size>::convert(inlineCapacity));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_object"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("inlineCapacity", m_inlineCapacity, false);
}
OpNewObject(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_inlineCapacity(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewObject(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_inlineCapacity(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewObject(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_inlineCapacity(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewObject decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setInlineCapacity(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setInlineCapacity<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setInlineCapacity<OpcodeSize::Wide16>(value, func);
else
setInlineCapacity<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setInlineCapacity(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_new_object;
Metadata(const OpNewObject&) { }
ObjectAllocationProfile m_objectAllocationProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
unsigned m_inlineCapacity;
unsigned m_metadataID;
};
struct OpGetByIdWithThis : public Instruction {
static constexpr OpcodeID opcodeID = op_get_by_id_with_this;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, unsigned property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, thisValue, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, thisValue, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, unsigned property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, thisValue, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, unsigned property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, thisValue, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, thisValue, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, thisValue, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, thisValue, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& thisValue, unsigned& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<unsigned, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, unsigned property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, thisValue, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_by_id_with_this"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("property", m_property, false);
}
OpGetByIdWithThis(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByIdWithThis(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByIdWithThis(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetByIdWithThis decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_by_id_with_this;
Metadata(const OpGetByIdWithThis&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_thisValue;
unsigned m_property;
unsigned m_metadataID;
};
struct OpMul : public Instruction {
static constexpr OpcodeID opcodeID = op_mul;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, OperandTypes& operandTypes, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<OperandTypes, __size>::check(operandTypes)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, operandTypes, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<OperandTypes, __size>::convert(operandTypes));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**mul"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("operandTypes", m_operandTypes, false);
}
OpMul(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpMul(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpMul(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpMul decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide16>(value, func);
else
setOperandTypes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (!Fits<OperandTypes, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<OperandTypes, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_mul;
Metadata(const OpMul&) { }
BinaryArithProfile m_arithProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
OperandTypes m_operandTypes;
unsigned m_metadataID;
};
struct OpDiv : public Instruction {
static constexpr OpcodeID opcodeID = op_div;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, OperandTypes& operandTypes, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<OperandTypes, __size>::check(operandTypes)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, operandTypes, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<OperandTypes, __size>::convert(operandTypes));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**div"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("operandTypes", m_operandTypes, false);
}
OpDiv(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDiv(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDiv(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpDiv decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide16>(value, func);
else
setOperandTypes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (!Fits<OperandTypes, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<OperandTypes, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_div;
Metadata(const OpDiv&) { }
BinaryArithProfile m_arithProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
OperandTypes m_operandTypes;
unsigned m_metadataID;
};
struct OpPutByValDirect : public Instruction {
static constexpr OpcodeID opcodeID = op_put_by_val_direct;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, value, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, base, property, value, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, base, property, value, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, value, ecmaMode, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, value, ecmaMode, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, value, ecmaMode, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, value, ecmaMode, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& property, VirtualRegister& value, ECMAMode& ecmaMode, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, value, ecmaMode, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_by_val_direct"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpPutByValDirect(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByValDirect(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByValDirect(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutByValDirect decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_put_by_val_direct;
Metadata(const OpPutByValDirect&) { }
ArrayProfile m_arrayProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_value;
ECMAMode m_ecmaMode;
unsigned m_metadataID;
};
struct OpGetPrototypeOf : public Instruction {
static constexpr OpcodeID opcodeID = op_get_prototype_of;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, value, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, value, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, value, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, value, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, value, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, value, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& value, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, value, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_prototype_of"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("value", m_value, false);
}
OpGetPrototypeOf(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetPrototypeOf(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetPrototypeOf(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetPrototypeOf decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_prototype_of;
Metadata(const OpGetPrototypeOf&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_value;
unsigned m_metadataID;
};
struct OpCreateThis : public Instruction {
static constexpr OpcodeID opcodeID = op_create_this;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned inlineCapacity)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, inlineCapacity);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned inlineCapacity)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, inlineCapacity, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned inlineCapacity)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, inlineCapacity, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned inlineCapacity, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, inlineCapacity, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned inlineCapacity)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, inlineCapacity, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, inlineCapacity, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, inlineCapacity, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, unsigned& inlineCapacity, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<unsigned, __size>::check(inlineCapacity)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned inlineCapacity, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, inlineCapacity, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<unsigned, __size>::convert(inlineCapacity));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_this"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("inlineCapacity", m_inlineCapacity, false);
}
OpCreateThis(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_inlineCapacity(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateThis(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_inlineCapacity(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateThis(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_inlineCapacity(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateThis decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setInlineCapacity(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setInlineCapacity<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setInlineCapacity<OpcodeSize::Wide16>(value, func);
else
setInlineCapacity<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setInlineCapacity(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_create_this;
Metadata(const OpCreateThis&) { }
WriteBarrier<JSCell> m_cachedCallee;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
unsigned m_inlineCapacity;
unsigned m_metadataID;
};
struct OpCreateGenerator : public Instruction {
static constexpr OpcodeID opcodeID = op_create_generator;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_generator"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
}
OpCreateGenerator(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateGenerator(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateGenerator(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateGenerator decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_create_generator;
Metadata(const OpCreateGenerator&) { }
WriteBarrier<JSCell> m_cachedCallee;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
unsigned m_metadataID;
};
struct OpJneqPtr : public Instruction {
static constexpr OpcodeID opcodeID = op_jneq_ptr;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value, VirtualRegister specialPointer, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value, specialPointer, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister value, VirtualRegister specialPointer, BoundLabel targetLabel)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, value, specialPointer, targetLabel, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister value, VirtualRegister specialPointer, BoundLabel targetLabel)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, value, specialPointer, targetLabel, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value, VirtualRegister specialPointer, BoundLabel targetLabel, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value, specialPointer, targetLabel, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value, VirtualRegister specialPointer, BoundLabel targetLabel)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value, specialPointer, targetLabel, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value, specialPointer, targetLabel, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value, specialPointer, targetLabel, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value, VirtualRegister& specialPointer, BoundLabel& targetLabel, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<VirtualRegister, __size>::check(specialPointer)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value, VirtualRegister specialPointer, BoundLabel targetLabel, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value, specialPointer, targetLabel, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<VirtualRegister, __size>::convert(specialPointer));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jneq_ptr"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
dumper->dumpOperand("specialPointer", m_specialPointer, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJneqPtr(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_specialPointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJneqPtr(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_specialPointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJneqPtr(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_specialPointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJneqPtr decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSpecialPointer(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSpecialPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSpecialPointer<OpcodeSize::Wide16>(value, func);
else
setSpecialPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSpecialPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_jneq_ptr;
Metadata(const OpJneqPtr&) { }
bool m_hasJumped;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_value;
VirtualRegister m_specialPointer;
BoundLabel m_targetLabel;
unsigned m_metadataID;
};
struct OpProfileType : public Instruction {
static constexpr OpcodeID opcodeID = op_profile_type;
static constexpr size_t length = 7;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister, SymbolTableOrScopeDepth symbolTableOrScopeDepth, ProfileTypeBytecodeFlag flag, unsigned identifier, ResolveType resolveType)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister, SymbolTableOrScopeDepth symbolTableOrScopeDepth, ProfileTypeBytecodeFlag flag, unsigned identifier, ResolveType resolveType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister, SymbolTableOrScopeDepth symbolTableOrScopeDepth, ProfileTypeBytecodeFlag flag, unsigned identifier, ResolveType resolveType)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister, SymbolTableOrScopeDepth symbolTableOrScopeDepth, ProfileTypeBytecodeFlag flag, unsigned identifier, ResolveType resolveType, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister, SymbolTableOrScopeDepth symbolTableOrScopeDepth, ProfileTypeBytecodeFlag flag, unsigned identifier, ResolveType resolveType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& targetVirtualRegister, SymbolTableOrScopeDepth& symbolTableOrScopeDepth, ProfileTypeBytecodeFlag& flag, unsigned& identifier, ResolveType& resolveType, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(targetVirtualRegister)
&& Fits<SymbolTableOrScopeDepth, __size>::check(symbolTableOrScopeDepth)
&& Fits<ProfileTypeBytecodeFlag, __size>::check(flag)
&& Fits<unsigned, __size>::check(identifier)
&& Fits<ResolveType, __size>::check(resolveType)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister, SymbolTableOrScopeDepth symbolTableOrScopeDepth, ProfileTypeBytecodeFlag flag, unsigned identifier, ResolveType resolveType, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, targetVirtualRegister, symbolTableOrScopeDepth, flag, identifier, resolveType, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(targetVirtualRegister));
gen->write(Fits<SymbolTableOrScopeDepth, __size>::convert(symbolTableOrScopeDepth));
gen->write(Fits<ProfileTypeBytecodeFlag, __size>::convert(flag));
gen->write(Fits<unsigned, __size>::convert(identifier));
gen->write(Fits<ResolveType, __size>::convert(resolveType));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**profile_type"[2 - __sizeShiftAmount]);
dumper->dumpOperand("targetVirtualRegister", m_targetVirtualRegister, true);
dumper->dumpOperand("symbolTableOrScopeDepth", m_symbolTableOrScopeDepth, false);
dumper->dumpOperand("flag", m_flag, false);
dumper->dumpOperand("identifier", m_identifier, false);
dumper->dumpOperand("resolveType", m_resolveType, false);
}
OpProfileType(const uint8_t* stream)
: m_targetVirtualRegister(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_symbolTableOrScopeDepth(Fits<SymbolTableOrScopeDepth, OpcodeSize::Narrow>::convert(stream[1]))
, m_flag(Fits<ProfileTypeBytecodeFlag, OpcodeSize::Narrow>::convert(stream[2]))
, m_identifier(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_resolveType(Fits<ResolveType, OpcodeSize::Narrow>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpProfileType(const uint16_t* stream)
: m_targetVirtualRegister(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_symbolTableOrScopeDepth(Fits<SymbolTableOrScopeDepth, OpcodeSize::Wide16>::convert(stream[1]))
, m_flag(Fits<ProfileTypeBytecodeFlag, OpcodeSize::Wide16>::convert(stream[2]))
, m_identifier(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_resolveType(Fits<ResolveType, OpcodeSize::Wide16>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpProfileType(const uint32_t* stream)
: m_targetVirtualRegister(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_symbolTableOrScopeDepth(Fits<SymbolTableOrScopeDepth, OpcodeSize::Wide32>::convert(stream[1]))
, m_flag(Fits<ProfileTypeBytecodeFlag, OpcodeSize::Wide32>::convert(stream[2]))
, m_identifier(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_resolveType(Fits<ResolveType, OpcodeSize::Wide32>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpProfileType decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTargetVirtualRegister(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetVirtualRegister<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetVirtualRegister<OpcodeSize::Wide16>(value, func);
else
setTargetVirtualRegister<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetVirtualRegister(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSymbolTableOrScopeDepth(SymbolTableOrScopeDepth value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSymbolTableOrScopeDepth<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSymbolTableOrScopeDepth<OpcodeSize::Wide16>(value, func);
else
setSymbolTableOrScopeDepth<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSymbolTableOrScopeDepth(SymbolTableOrScopeDepth value, Functor func)
{
if (!Fits<SymbolTableOrScopeDepth, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<SymbolTableOrScopeDepth, size>::convert(value);
}
template<typename Functor>
void setFlag(ProfileTypeBytecodeFlag value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFlag<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFlag<OpcodeSize::Wide16>(value, func);
else
setFlag<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFlag(ProfileTypeBytecodeFlag value, Functor func)
{
if (!Fits<ProfileTypeBytecodeFlag, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ProfileTypeBytecodeFlag, size>::convert(value);
}
template<typename Functor>
void setIdentifier(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIdentifier<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIdentifier<OpcodeSize::Wide16>(value, func);
else
setIdentifier<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIdentifier(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setResolveType(ResolveType value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setResolveType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setResolveType<OpcodeSize::Wide16>(value, func);
else
setResolveType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setResolveType(ResolveType value, Functor func)
{
if (!Fits<ResolveType, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ResolveType, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_profile_type;
Metadata(const OpProfileType&) { }
TypeLocation* m_typeLocation;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_targetVirtualRegister;
SymbolTableOrScopeDepth m_symbolTableOrScopeDepth;
ProfileTypeBytecodeFlag m_flag;
unsigned m_identifier;
ResolveType m_resolveType;
unsigned m_metadataID;
};
struct OpProfileControlFlow : public Instruction {
static constexpr OpcodeID opcodeID = op_profile_control_flow;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, int textOffset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, textOffset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, int textOffset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, textOffset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, int textOffset)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, textOffset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, int textOffset, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, textOffset, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, int textOffset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, textOffset, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, textOffset, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, textOffset, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, int& textOffset, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<int, __size>::check(textOffset)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, int textOffset, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, textOffset, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<int, __size>::convert(textOffset));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**profile_control_flow"[2 - __sizeShiftAmount]);
dumper->dumpOperand("textOffset", m_textOffset, true);
}
OpProfileControlFlow(const uint8_t* stream)
: m_textOffset(Fits<int, OpcodeSize::Narrow>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpProfileControlFlow(const uint16_t* stream)
: m_textOffset(Fits<int, OpcodeSize::Wide16>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpProfileControlFlow(const uint32_t* stream)
: m_textOffset(Fits<int, OpcodeSize::Wide32>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpProfileControlFlow decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTextOffset(int value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTextOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTextOffset<OpcodeSize::Wide16>(value, func);
else
setTextOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTextOffset(int value, Functor func)
{
if (!Fits<int, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<int, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_profile_control_flow;
Metadata(const OpProfileControlFlow&) { }
BasicBlockLocation* m_basicBlockLocation;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
int m_textOffset;
unsigned m_metadataID;
};
struct OpBitor : public Instruction {
static constexpr OpcodeID opcodeID = op_bitor;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**bitor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpBitor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpBitor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_bitor;
Metadata(const OpBitor&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
unsigned m_metadataID;
};
struct OpHasEnumerableIndexedProperty : public Instruction {
static constexpr OpcodeID opcodeID = op_has_enumerable_indexed_property;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**has_enumerable_indexed_property"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpHasEnumerableIndexedProperty(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasEnumerableIndexedProperty(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasEnumerableIndexedProperty(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpHasEnumerableIndexedProperty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_has_enumerable_indexed_property;
Metadata(const OpHasEnumerableIndexedProperty&) { }
ArrayProfile m_arrayProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
unsigned m_metadataID;
};
struct OpBitxor : public Instruction {
static constexpr OpcodeID opcodeID = op_bitxor;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**bitxor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpBitxor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitxor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitxor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpBitxor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_bitxor;
Metadata(const OpBitxor&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
unsigned m_metadataID;
};
struct OpLshift : public Instruction {
static constexpr OpcodeID opcodeID = op_lshift;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**lshift"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpLshift(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLshift(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLshift(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpLshift decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_lshift;
Metadata(const OpLshift&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
unsigned m_metadataID;
};
struct OpRshift : public Instruction {
static constexpr OpcodeID opcodeID = op_rshift;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**rshift"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpRshift(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpRshift(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpRshift(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpRshift decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_rshift;
Metadata(const OpRshift&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
unsigned m_metadataID;
};
struct OpBitnot : public Instruction {
static constexpr OpcodeID opcodeID = op_bitnot;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, operand, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, operand, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**bitnot"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpBitnot(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitnot(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitnot(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpBitnot decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_bitnot;
Metadata(const OpBitnot&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
unsigned m_metadataID;
};
struct OpGetDirectPname : public Instruction {
static constexpr OpcodeID opcodeID = op_get_direct_pname;
static constexpr size_t length = 7;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister index, VirtualRegister enumerator)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property, index, enumerator);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister index, VirtualRegister enumerator)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, index, enumerator, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister index, VirtualRegister enumerator)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, index, enumerator, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister index, VirtualRegister enumerator, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, index, enumerator, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister index, VirtualRegister enumerator)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, index, enumerator, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, index, enumerator, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, index, enumerator, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, VirtualRegister& index, VirtualRegister& enumerator, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(index)
&& Fits<VirtualRegister, __size>::check(enumerator)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister index, VirtualRegister enumerator, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, index, enumerator, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(index));
gen->write(Fits<VirtualRegister, __size>::convert(enumerator));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_direct_pname"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("index", m_index, false);
dumper->dumpOperand("enumerator", m_enumerator, false);
}
OpGetDirectPname(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_index(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetDirectPname(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetDirectPname(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetDirectPname decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide16>(value, func);
else
setEnumerator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_direct_pname;
Metadata(const OpGetDirectPname&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_index;
VirtualRegister m_enumerator;
unsigned m_metadataID;
};
struct OpNewArray : public Instruction {
static constexpr OpcodeID opcodeID = op_new_array;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, IndexingType recommendedIndexingType)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, argv, argc, recommendedIndexingType);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, IndexingType recommendedIndexingType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, argv, argc, recommendedIndexingType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, IndexingType recommendedIndexingType)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, argv, argc, recommendedIndexingType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, IndexingType recommendedIndexingType, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, argv, argc, recommendedIndexingType, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, IndexingType recommendedIndexingType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, argv, argc, recommendedIndexingType, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, argv, argc, recommendedIndexingType, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, argv, argc, recommendedIndexingType, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& argv, unsigned& argc, IndexingType& recommendedIndexingType, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(argv)
&& Fits<unsigned, __size>::check(argc)
&& Fits<IndexingType, __size>::check(recommendedIndexingType)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, IndexingType recommendedIndexingType, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, argv, argc, recommendedIndexingType, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(argv));
gen->write(Fits<unsigned, __size>::convert(argc));
gen->write(Fits<IndexingType, __size>::convert(recommendedIndexingType));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_array"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("argv", m_argv, false);
dumper->dumpOperand("argc", m_argc, false);
dumper->dumpOperand("recommendedIndexingType", m_recommendedIndexingType, false);
}
OpNewArray(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_argv(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_recommendedIndexingType(Fits<IndexingType, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArray(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_argv(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_recommendedIndexingType(Fits<IndexingType, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArray(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_argv(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_recommendedIndexingType(Fits<IndexingType, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewArray decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgv(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide16>(value, func);
else
setArgv<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgv(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgc(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide16>(value, func);
else
setArgc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgc(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setRecommendedIndexingType(IndexingType value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRecommendedIndexingType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRecommendedIndexingType<OpcodeSize::Wide16>(value, func);
else
setRecommendedIndexingType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRecommendedIndexingType(IndexingType value, Functor func)
{
if (!Fits<IndexingType, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<IndexingType, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_new_array;
Metadata(const OpNewArray&) { }
ArrayAllocationProfile m_arrayAllocationProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_argv;
unsigned m_argc;
IndexingType m_recommendedIndexingType;
unsigned m_metadataID;
};
struct OpPutById : public Instruction {
static constexpr OpcodeID opcodeID = op_put_by_id;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, VirtualRegister value, PutByIdFlags flags)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, value, flags);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, VirtualRegister value, PutByIdFlags flags)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, base, property, value, flags, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister base, unsigned property, VirtualRegister value, PutByIdFlags flags)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, base, property, value, flags, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, VirtualRegister value, PutByIdFlags flags, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, value, flags, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, unsigned property, VirtualRegister value, PutByIdFlags flags)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, value, flags, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, value, flags, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, value, flags, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, unsigned& property, VirtualRegister& value, PutByIdFlags& flags, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<PutByIdFlags, __size>::check(flags)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, unsigned property, VirtualRegister value, PutByIdFlags flags, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, value, flags, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<PutByIdFlags, __size>::convert(flags));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("flags", m_flags, false);
}
OpPutById(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_flags(Fits<PutByIdFlags, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutById(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_flags(Fits<PutByIdFlags, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutById(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_flags(Fits<PutByIdFlags, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFlags(PutByIdFlags value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFlags<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFlags<OpcodeSize::Wide16>(value, func);
else
setFlags<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFlags(PutByIdFlags value, Functor func)
{
if (!Fits<PutByIdFlags, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<PutByIdFlags, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_put_by_id;
Metadata(const OpPutById&) { }
StructureID m_oldStructureID;
unsigned m_offset;
StructureID m_newStructureID;
WriteBarrierBase<StructureChain> m_structureChain;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_base;
unsigned m_property;
VirtualRegister m_value;
PutByIdFlags m_flags;
unsigned m_metadataID;
};
struct OpInByVal : public Instruction {
static constexpr OpcodeID opcodeID = op_in_by_val;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**in_by_val"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpInByVal(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInByVal(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInByVal(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpInByVal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_in_by_val;
Metadata(const OpInByVal&) { }
ArrayProfile m_arrayProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
unsigned m_metadataID;
};
struct OpNewArrayBuffer : public Instruction {
static constexpr OpcodeID opcodeID = op_new_array_buffer;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister immutableButterfly, IndexingType recommendedIndexingType)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, immutableButterfly, recommendedIndexingType);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister immutableButterfly, IndexingType recommendedIndexingType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, immutableButterfly, recommendedIndexingType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister immutableButterfly, IndexingType recommendedIndexingType)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, immutableButterfly, recommendedIndexingType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister immutableButterfly, IndexingType recommendedIndexingType, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, immutableButterfly, recommendedIndexingType, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister immutableButterfly, IndexingType recommendedIndexingType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, immutableButterfly, recommendedIndexingType, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, immutableButterfly, recommendedIndexingType, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, immutableButterfly, recommendedIndexingType, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& immutableButterfly, IndexingType& recommendedIndexingType, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(immutableButterfly)
&& Fits<IndexingType, __size>::check(recommendedIndexingType)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister immutableButterfly, IndexingType recommendedIndexingType, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, immutableButterfly, recommendedIndexingType, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(immutableButterfly));
gen->write(Fits<IndexingType, __size>::convert(recommendedIndexingType));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_array_buffer"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("immutableButterfly", m_immutableButterfly, false);
dumper->dumpOperand("recommendedIndexingType", m_recommendedIndexingType, false);
}
OpNewArrayBuffer(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_immutableButterfly(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_recommendedIndexingType(Fits<IndexingType, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArrayBuffer(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_immutableButterfly(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_recommendedIndexingType(Fits<IndexingType, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArrayBuffer(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_immutableButterfly(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_recommendedIndexingType(Fits<IndexingType, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewArrayBuffer decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setImmutableButterfly(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setImmutableButterfly<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setImmutableButterfly<OpcodeSize::Wide16>(value, func);
else
setImmutableButterfly<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setImmutableButterfly(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRecommendedIndexingType(IndexingType value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRecommendedIndexingType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRecommendedIndexingType<OpcodeSize::Wide16>(value, func);
else
setRecommendedIndexingType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRecommendedIndexingType(IndexingType value, Functor func)
{
if (!Fits<IndexingType, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<IndexingType, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_new_array_buffer;
Metadata(const OpNewArrayBuffer&) { }
ArrayAllocationProfile m_arrayAllocationProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_immutableButterfly;
IndexingType m_recommendedIndexingType;
unsigned m_metadataID;
};
struct OpGetById : public Instruction {
static constexpr OpcodeID opcodeID = op_get_by_id;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, unsigned& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpGetById(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetById(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetById(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_by_id;
Metadata(const OpGetById&) { }
GetByIdModeMetadata m_modeMetadata;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
unsigned m_property;
unsigned m_metadataID;
};
struct OpGetInternalField : public Instruction {
static constexpr OpcodeID opcodeID = op_get_internal_field;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned index)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, index);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned index)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, index, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned index)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, index, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned index, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, index, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned index)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, index, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, index, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, index, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, unsigned& index, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(index)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned index, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, index, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(index));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_internal_field"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("index", m_index, false);
}
OpGetInternalField(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_index(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetInternalField(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_index(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetInternalField(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_index(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetInternalField decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_internal_field;
Metadata(const OpGetInternalField&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
unsigned m_index;
unsigned m_metadataID;
};
struct OpToThis : public Instruction {
static constexpr OpcodeID opcodeID = op_to_this;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister srcDst, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, srcDst, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister srcDst, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, srcDst, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister srcDst, ECMAMode ecmaMode)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, srcDst, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister srcDst, ECMAMode ecmaMode, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, srcDst, ecmaMode, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister srcDst, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, srcDst, ecmaMode, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, srcDst, ecmaMode, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, srcDst, ecmaMode, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& srcDst, ECMAMode& ecmaMode, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(srcDst)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister srcDst, ECMAMode ecmaMode, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, srcDst, ecmaMode, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(srcDst));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_this"[2 - __sizeShiftAmount]);
dumper->dumpOperand("srcDst", m_srcDst, true);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpToThis(const uint8_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToThis(const uint16_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToThis(const uint32_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToThis decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide16>(value, func);
else
setSrcDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_to_this;
Metadata(const OpToThis&) { }
StructureID m_cachedStructureID;
ToThisStatus m_toThisStatus;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_srcDst;
ECMAMode m_ecmaMode;
unsigned m_metadataID;
};
struct OpPutByVal : public Instruction {
static constexpr OpcodeID opcodeID = op_put_by_val;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, value, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, base, property, value, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, base, property, value, ecmaMode, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, value, ecmaMode, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, value, ecmaMode, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, value, ecmaMode, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, value, ecmaMode, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& property, VirtualRegister& value, ECMAMode& ecmaMode, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, value, ecmaMode, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_by_val"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpPutByVal(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByVal(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByVal(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutByVal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_put_by_val;
Metadata(const OpPutByVal&) { }
ArrayProfile m_arrayProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_value;
ECMAMode m_ecmaMode;
unsigned m_metadataID;
};
struct OpCreatePromise : public Instruction {
static constexpr OpcodeID opcodeID = op_create_promise;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, bool isInternalPromise)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee, isInternalPromise);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, bool isInternalPromise)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, isInternalPromise, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, bool isInternalPromise)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, isInternalPromise, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, bool isInternalPromise, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, isInternalPromise, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, bool isInternalPromise)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, isInternalPromise, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, isInternalPromise, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, isInternalPromise, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, bool& isInternalPromise, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<bool, __size>::check(isInternalPromise)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, bool isInternalPromise, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, isInternalPromise, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<bool, __size>::convert(isInternalPromise));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_promise"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
dumper->dumpOperand("isInternalPromise", m_isInternalPromise, false);
}
OpCreatePromise(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_isInternalPromise(Fits<bool, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreatePromise(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_isInternalPromise(Fits<bool, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreatePromise(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_isInternalPromise(Fits<bool, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreatePromise decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIsInternalPromise(bool value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIsInternalPromise<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIsInternalPromise<OpcodeSize::Wide16>(value, func);
else
setIsInternalPromise<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIsInternalPromise(bool value, Functor func)
{
if (!Fits<bool, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<bool, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_create_promise;
Metadata(const OpCreatePromise&) { }
WriteBarrier<JSCell> m_cachedCallee;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
bool m_isInternalPromise;
unsigned m_metadataID;
};
struct OpCreateAsyncGenerator : public Instruction {
static constexpr OpcodeID opcodeID = op_create_async_generator;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, callee);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, callee, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, callee, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, callee, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, callee, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, callee, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, callee, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& callee, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(callee)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister callee, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, callee, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(callee));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_async_generator"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("callee", m_callee, false);
}
OpCreateAsyncGenerator(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateAsyncGenerator(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateAsyncGenerator(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_callee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateAsyncGenerator decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCallee<OpcodeSize::Wide16>(value, func);
else
setCallee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCallee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_create_async_generator;
Metadata(const OpCreateAsyncGenerator&) { }
WriteBarrier<JSCell> m_cachedCallee;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_callee;
unsigned m_metadataID;
};
struct OpGetByValWithThis : public Instruction {
static constexpr OpcodeID opcodeID = op_get_by_val_with_this;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, thisValue, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, base, thisValue, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, base, thisValue, property, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, thisValue, property, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, thisValue, property, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, thisValue, property, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, thisValue, property, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& thisValue, VirtualRegister& property, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, thisValue, property, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_by_val_with_this"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("property", m_property, false);
}
OpGetByValWithThis(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByValWithThis(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetByValWithThis(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetByValWithThis decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_by_val_with_this;
Metadata(const OpGetByValWithThis&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_thisValue;
VirtualRegister m_property;
unsigned m_metadataID;
};
struct OpResolveScope : public Instruction {
static constexpr OpcodeID opcodeID = op_resolve_scope;
static constexpr size_t length = 7;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, ResolveType resolveType, unsigned localScopeDepth)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, var, resolveType, localScopeDepth);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, ResolveType resolveType, unsigned localScopeDepth)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, scope, var, resolveType, localScopeDepth, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, ResolveType resolveType, unsigned localScopeDepth)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, scope, var, resolveType, localScopeDepth, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, ResolveType resolveType, unsigned localScopeDepth, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, var, resolveType, localScopeDepth, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, ResolveType resolveType, unsigned localScopeDepth)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, var, resolveType, localScopeDepth, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, var, resolveType, localScopeDepth, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, var, resolveType, localScopeDepth, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& var, ResolveType& resolveType, unsigned& localScopeDepth, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(var)
&& Fits<ResolveType, __size>::check(resolveType)
&& Fits<unsigned, __size>::check(localScopeDepth)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, ResolveType resolveType, unsigned localScopeDepth, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, var, resolveType, localScopeDepth, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(var));
gen->write(Fits<ResolveType, __size>::convert(resolveType));
gen->write(Fits<unsigned, __size>::convert(localScopeDepth));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**resolve_scope"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("var", m_var, false);
dumper->dumpOperand("resolveType", m_resolveType, false);
dumper->dumpOperand("localScopeDepth", m_localScopeDepth, false);
}
OpResolveScope(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_var(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_resolveType(Fits<ResolveType, OpcodeSize::Narrow>::convert(stream[3]))
, m_localScopeDepth(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpResolveScope(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_var(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_resolveType(Fits<ResolveType, OpcodeSize::Wide16>::convert(stream[3]))
, m_localScopeDepth(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpResolveScope(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_var(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_resolveType(Fits<ResolveType, OpcodeSize::Wide32>::convert(stream[3]))
, m_localScopeDepth(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpResolveScope decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setVar(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setVar<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setVar<OpcodeSize::Wide16>(value, func);
else
setVar<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setVar(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setResolveType(ResolveType value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setResolveType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setResolveType<OpcodeSize::Wide16>(value, func);
else
setResolveType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setResolveType(ResolveType value, Functor func)
{
if (!Fits<ResolveType, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ResolveType, size>::convert(value);
}
template<typename Functor>
void setLocalScopeDepth(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLocalScopeDepth<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLocalScopeDepth<OpcodeSize::Wide16>(value, func);
else
setLocalScopeDepth<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLocalScopeDepth(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_resolve_scope;
Metadata(const OpResolveScope&) { }
ResolveType m_resolveType;
union {
unsigned m_localScopeDepth;
unsigned m_globalLexicalBindingEpoch;
};
union {
WriteBarrierBase<JSCell> m_lexicalEnvironment;
WriteBarrierBase<SymbolTable> m_symbolTable;
WriteBarrierBase<JSScope> m_constantScope;
WriteBarrierBase<JSGlobalLexicalEnvironment> m_globalLexicalEnvironment;
WriteBarrierBase<JSGlobalObject> m_globalObject;
};
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_var;
ResolveType m_resolveType;
unsigned m_localScopeDepth;
unsigned m_metadataID;
};
struct OpGetFromScope : public Instruction {
static constexpr OpcodeID opcodeID = op_get_from_scope;
static constexpr size_t length = 8;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, GetPutInfo getPutInfo, unsigned localScopeDepth, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, GetPutInfo getPutInfo, unsigned localScopeDepth, unsigned offset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, GetPutInfo getPutInfo, unsigned localScopeDepth, unsigned offset)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, GetPutInfo getPutInfo, unsigned localScopeDepth, unsigned offset, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, GetPutInfo getPutInfo, unsigned localScopeDepth, unsigned offset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& var, GetPutInfo& getPutInfo, unsigned& localScopeDepth, unsigned& offset, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(var)
&& Fits<GetPutInfo, __size>::check(getPutInfo)
&& Fits<unsigned, __size>::check(localScopeDepth)
&& Fits<unsigned, __size>::check(offset)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned var, GetPutInfo getPutInfo, unsigned localScopeDepth, unsigned offset, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, var, getPutInfo, localScopeDepth, offset, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(var));
gen->write(Fits<GetPutInfo, __size>::convert(getPutInfo));
gen->write(Fits<unsigned, __size>::convert(localScopeDepth));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_from_scope"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("var", m_var, false);
dumper->dumpOperand("getPutInfo", m_getPutInfo, false);
dumper->dumpOperand("localScopeDepth", m_localScopeDepth, false);
dumper->dumpOperand("offset", m_offset, false);
}
OpGetFromScope(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_var(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_getPutInfo(Fits<GetPutInfo, OpcodeSize::Narrow>::convert(stream[3]))
, m_localScopeDepth(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetFromScope(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_var(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_getPutInfo(Fits<GetPutInfo, OpcodeSize::Wide16>::convert(stream[3]))
, m_localScopeDepth(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetFromScope(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_var(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_getPutInfo(Fits<GetPutInfo, OpcodeSize::Wide32>::convert(stream[3]))
, m_localScopeDepth(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetFromScope decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setVar(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setVar<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setVar<OpcodeSize::Wide16>(value, func);
else
setVar<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setVar(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setGetPutInfo(GetPutInfo value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setGetPutInfo<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setGetPutInfo<OpcodeSize::Wide16>(value, func);
else
setGetPutInfo<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGetPutInfo(GetPutInfo value, Functor func)
{
if (!Fits<GetPutInfo, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<GetPutInfo, size>::convert(value);
}
template<typename Functor>
void setLocalScopeDepth(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLocalScopeDepth<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLocalScopeDepth<OpcodeSize::Wide16>(value, func);
else
setLocalScopeDepth<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLocalScopeDepth(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 5 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_from_scope;
Metadata(const OpGetFromScope& __op)
: m_getPutInfo(__op.m_getPutInfo)
, m_operand(__op.m_offset)
{ }
GetPutInfo m_getPutInfo;
union {
WatchpointSet* m_watchpointSet;
WriteBarrierBase<Structure> m_structure;
};
uintptr_t m_operand;
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_var;
GetPutInfo m_getPutInfo;
unsigned m_localScopeDepth;
unsigned m_offset;
unsigned m_metadataID;
};
struct OpPutToScope : public Instruction {
static constexpr OpcodeID opcodeID = op_put_to_scope;
static constexpr size_t length = 8;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister scope, unsigned var, VirtualRegister value, GetPutInfo getPutInfo, SymbolTableOrScopeDepth symbolTableOrScopeDepth, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister scope, unsigned var, VirtualRegister value, GetPutInfo getPutInfo, SymbolTableOrScopeDepth symbolTableOrScopeDepth, unsigned offset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister scope, unsigned var, VirtualRegister value, GetPutInfo getPutInfo, SymbolTableOrScopeDepth symbolTableOrScopeDepth, unsigned offset)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister scope, unsigned var, VirtualRegister value, GetPutInfo getPutInfo, SymbolTableOrScopeDepth symbolTableOrScopeDepth, unsigned offset, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister scope, unsigned var, VirtualRegister value, GetPutInfo getPutInfo, SymbolTableOrScopeDepth symbolTableOrScopeDepth, unsigned offset)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& scope, unsigned& var, VirtualRegister& value, GetPutInfo& getPutInfo, SymbolTableOrScopeDepth& symbolTableOrScopeDepth, unsigned& offset, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(var)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<GetPutInfo, __size>::check(getPutInfo)
&& Fits<SymbolTableOrScopeDepth, __size>::check(symbolTableOrScopeDepth)
&& Fits<unsigned, __size>::check(offset)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister scope, unsigned var, VirtualRegister value, GetPutInfo getPutInfo, SymbolTableOrScopeDepth symbolTableOrScopeDepth, unsigned offset, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, scope, var, value, getPutInfo, symbolTableOrScopeDepth, offset, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(var));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<GetPutInfo, __size>::convert(getPutInfo));
gen->write(Fits<SymbolTableOrScopeDepth, __size>::convert(symbolTableOrScopeDepth));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_to_scope"[2 - __sizeShiftAmount]);
dumper->dumpOperand("scope", m_scope, true);
dumper->dumpOperand("var", m_var, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("getPutInfo", m_getPutInfo, false);
dumper->dumpOperand("symbolTableOrScopeDepth", m_symbolTableOrScopeDepth, false);
dumper->dumpOperand("offset", m_offset, false);
}
OpPutToScope(const uint8_t* stream)
: m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_var(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_getPutInfo(Fits<GetPutInfo, OpcodeSize::Narrow>::convert(stream[3]))
, m_symbolTableOrScopeDepth(Fits<SymbolTableOrScopeDepth, OpcodeSize::Narrow>::convert(stream[4]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutToScope(const uint16_t* stream)
: m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_var(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_getPutInfo(Fits<GetPutInfo, OpcodeSize::Wide16>::convert(stream[3]))
, m_symbolTableOrScopeDepth(Fits<SymbolTableOrScopeDepth, OpcodeSize::Wide16>::convert(stream[4]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutToScope(const uint32_t* stream)
: m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_var(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_getPutInfo(Fits<GetPutInfo, OpcodeSize::Wide32>::convert(stream[3]))
, m_symbolTableOrScopeDepth(Fits<SymbolTableOrScopeDepth, OpcodeSize::Wide32>::convert(stream[4]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[5]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[6]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutToScope decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setVar(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setVar<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setVar<OpcodeSize::Wide16>(value, func);
else
setVar<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setVar(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setGetPutInfo(GetPutInfo value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setGetPutInfo<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setGetPutInfo<OpcodeSize::Wide16>(value, func);
else
setGetPutInfo<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGetPutInfo(GetPutInfo value, Functor func)
{
if (!Fits<GetPutInfo, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<GetPutInfo, size>::convert(value);
}
template<typename Functor>
void setSymbolTableOrScopeDepth(SymbolTableOrScopeDepth value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSymbolTableOrScopeDepth<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSymbolTableOrScopeDepth<OpcodeSize::Wide16>(value, func);
else
setSymbolTableOrScopeDepth<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSymbolTableOrScopeDepth(SymbolTableOrScopeDepth value, Functor func)
{
if (!Fits<SymbolTableOrScopeDepth, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<SymbolTableOrScopeDepth, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 5 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_put_to_scope;
Metadata(const OpPutToScope& __op)
: m_getPutInfo(__op.m_getPutInfo)
, m_operand(__op.m_offset)
{ }
GetPutInfo m_getPutInfo;
union {
WriteBarrierBase<Structure> m_structure;
WatchpointSet* m_watchpointSet;
};
uintptr_t m_operand;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_scope;
unsigned m_var;
VirtualRegister m_value;
GetPutInfo m_getPutInfo;
SymbolTableOrScopeDepth m_symbolTableOrScopeDepth;
unsigned m_offset;
unsigned m_metadataID;
};
struct OpGetFromArguments : public Instruction {
static constexpr OpcodeID opcodeID = op_get_from_arguments;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arguments, unsigned index)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, arguments, index);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arguments, unsigned index)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, arguments, index, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arguments, unsigned index)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, arguments, index, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arguments, unsigned index, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, arguments, index, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arguments, unsigned index)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, arguments, index, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, arguments, index, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, arguments, index, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& arguments, unsigned& index, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(arguments)
&& Fits<unsigned, __size>::check(index)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arguments, unsigned index, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, arguments, index, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(arguments));
gen->write(Fits<unsigned, __size>::convert(index));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_from_arguments"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("arguments", m_arguments, false);
dumper->dumpOperand("index", m_index, false);
}
OpGetFromArguments(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_index(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetFromArguments(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_index(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetFromArguments(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_arguments(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_index(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetFromArguments decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide16>(value, func);
else
setArguments<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_get_from_arguments;
Metadata(const OpGetFromArguments&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_arguments;
unsigned m_index;
unsigned m_metadataID;
};
struct OpSub : public Instruction {
static constexpr OpcodeID opcodeID = op_sub;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, operandTypes, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, operandTypes, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, OperandTypes& operandTypes, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<OperandTypes, __size>::check(operandTypes)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, OperandTypes operandTypes, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, operandTypes, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<OperandTypes, __size>::convert(operandTypes));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**sub"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("operandTypes", m_operandTypes, false);
}
OpSub(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Narrow>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSub(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide16>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSub(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_operandTypes(Fits<OperandTypes, OpcodeSize::Wide32>::convert(stream[3]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSub decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperandTypes<OpcodeSize::Wide16>(value, func);
else
setOperandTypes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperandTypes(OperandTypes value, Functor func)
{
if (!Fits<OperandTypes, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<OperandTypes, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_sub;
Metadata(const OpSub&) { }
BinaryArithProfile m_arithProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
OperandTypes m_operandTypes;
unsigned m_metadataID;
};
struct OpBitand : public Instruction {
static constexpr OpcodeID opcodeID = op_bitand;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**bitand"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpBitand(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitand(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBitand(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpBitand decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_bitand;
Metadata(const OpBitand&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
unsigned m_metadataID;
};
struct OpInc : public Instruction {
static constexpr OpcodeID opcodeID = op_inc;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister srcDst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, srcDst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister srcDst)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, srcDst, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister srcDst)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, srcDst, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, srcDst, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister srcDst)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, srcDst, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, srcDst, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, srcDst, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& srcDst, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(srcDst)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, srcDst, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(srcDst));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**inc"[2 - __sizeShiftAmount]);
dumper->dumpOperand("srcDst", m_srcDst, true);
}
OpInc(const uint8_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInc(const uint16_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInc(const uint32_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpInc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide16>(value, func);
else
setSrcDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_inc;
Metadata(const OpInc&) { }
UnaryArithProfile m_arithProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_srcDst;
unsigned m_metadataID;
};
struct OpDec : public Instruction {
static constexpr OpcodeID opcodeID = op_dec;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister srcDst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, srcDst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister srcDst)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, srcDst, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister srcDst)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, srcDst, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, srcDst, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister srcDst)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, srcDst, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, srcDst, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, srcDst, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& srcDst, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(srcDst)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, srcDst, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(srcDst));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**dec"[2 - __sizeShiftAmount]);
dumper->dumpOperand("srcDst", m_srcDst, true);
}
OpDec(const uint8_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDec(const uint16_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDec(const uint32_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpDec decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide16>(value, func);
else
setSrcDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_dec;
Metadata(const OpDec&) { }
UnaryArithProfile m_arithProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_srcDst;
unsigned m_metadataID;
};
struct OpToObject : public Instruction {
static constexpr OpcodeID opcodeID = op_to_object;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned message)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand, message);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned message)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, operand, message, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned message)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, operand, message, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned message, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand, message, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned message)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand, message, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand, message, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand, message, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand, unsigned& message, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& Fits<unsigned, __size>::check(message)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned message, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand, message, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
gen->write(Fits<unsigned, __size>::convert(message));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_object"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
dumper->dumpOperand("message", m_message, false);
}
OpToObject(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_message(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToObject(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_message(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToObject(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_message(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToObject decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setMessage(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setMessage<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setMessage<OpcodeSize::Wide16>(value, func);
else
setMessage<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setMessage(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_to_object;
Metadata(const OpToObject&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
unsigned m_message;
unsigned m_metadataID;
};
struct OpCatch : public Instruction {
static constexpr OpcodeID opcodeID = op_catch;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister exception, VirtualRegister thrownValue)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, exception, thrownValue);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister exception, VirtualRegister thrownValue)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, exception, thrownValue, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister exception, VirtualRegister thrownValue)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, exception, thrownValue, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister exception, VirtualRegister thrownValue, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, exception, thrownValue, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister exception, VirtualRegister thrownValue)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, exception, thrownValue, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, exception, thrownValue, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, exception, thrownValue, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& exception, VirtualRegister& thrownValue, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(exception)
&& Fits<VirtualRegister, __size>::check(thrownValue)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister exception, VirtualRegister thrownValue, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, exception, thrownValue, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(exception));
gen->write(Fits<VirtualRegister, __size>::convert(thrownValue));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**catch"[2 - __sizeShiftAmount]);
dumper->dumpOperand("exception", m_exception, true);
dumper->dumpOperand("thrownValue", m_thrownValue, false);
}
OpCatch(const uint8_t* stream)
: m_exception(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_thrownValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCatch(const uint16_t* stream)
: m_exception(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_thrownValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCatch(const uint32_t* stream)
: m_exception(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_thrownValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCatch decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setException(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setException<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setException<OpcodeSize::Wide16>(value, func);
else
setException<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setException(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThrownValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThrownValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThrownValue<OpcodeSize::Wide16>(value, func);
else
setThrownValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThrownValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_catch;
Metadata(const OpCatch&) { }
ValueProfileAndVirtualRegisterBuffer* m_buffer;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_exception;
VirtualRegister m_thrownValue;
unsigned m_metadataID;
};
struct OpToNumber : public Instruction {
static constexpr OpcodeID opcodeID = op_to_number;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, operand, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, operand, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_number"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpToNumber(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToNumber(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToNumber(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToNumber decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_to_number;
Metadata(const OpToNumber&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
unsigned m_metadataID;
};
struct OpToNumeric : public Instruction {
static constexpr OpcodeID opcodeID = op_to_numeric;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, operand, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, operand, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_numeric"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpToNumeric(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToNumeric(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToNumeric(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToNumeric decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_to_numeric;
Metadata(const OpToNumeric&) { }
ValueProfile m_profile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
unsigned m_metadataID;
};
struct OpNegate : public Instruction {
static constexpr OpcodeID opcodeID = op_negate;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, ResultType resultType)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand, resultType);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, ResultType resultType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
return emit<__size, BytecodeGenerator, shouldAssert>(gen, dst, operand, resultType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkWithoutMetadataID(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, ResultType resultType)
{
decltype(gen->addMetadataFor(opcodeID)) __metadataID { };
return checkImpl<__size, BytecodeGenerator>(gen, dst, operand, resultType, __metadataID);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, ResultType resultType, unsigned __metadataID)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand, resultType, __metadataID);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, ResultType resultType)
{
auto __metadataID = gen->addMetadataFor(opcodeID);
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand, resultType, __metadataID))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand, resultType, __metadataID))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand, resultType, __metadataID);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand, ResultType& resultType, unsigned __metadataID)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& Fits<ResultType, __size>::check(resultType)
&& Fits<unsigned, __size>::check(__metadataID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, ResultType resultType, unsigned __metadataID)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand, resultType, __metadataID)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
gen->write(Fits<ResultType, __size>::convert(resultType));
gen->write(Fits<unsigned, __size>::convert(__metadataID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**negate"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
dumper->dumpOperand("resultType", m_resultType, false);
}
OpNegate(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_resultType(Fits<ResultType, OpcodeSize::Narrow>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNegate(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_resultType(Fits<ResultType, OpcodeSize::Wide16>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNegate(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_resultType(Fits<ResultType, OpcodeSize::Wide32>::convert(stream[2]))
, m_metadataID(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNegate decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setResultType(ResultType value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setResultType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setResultType<OpcodeSize::Wide16>(value, func);
else
setResultType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setResultType(ResultType value, Functor func)
{
if (!Fits<ResultType, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ResultType, size>::convert(value);
}
struct Metadata {
WTF_MAKE_NONCOPYABLE(Metadata);
public:
static constexpr OpcodeID opcodeID = op_negate;
Metadata(const OpNegate&) { }
UnaryArithProfile m_arithProfile;
};
Metadata& metadata(CodeBlock* codeBlock) const
{
return codeBlock->metadata<Metadata>(opcodeID, m_metadataID);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
ResultType m_resultType;
unsigned m_metadataID;
};
struct OpPutSetterByVal : public Instruction {
static constexpr OpcodeID opcodeID = op_put_setter_by_val;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, attributes, accessor);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, attributes, accessor);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, attributes, accessor);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& property, unsigned& attributes, VirtualRegister& accessor)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<unsigned, __size>::check(attributes)
&& Fits<VirtualRegister, __size>::check(accessor)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, attributes, accessor)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(attributes));
gen->write(Fits<VirtualRegister, __size>::convert(accessor));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_setter_by_val"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("attributes", m_attributes, false);
dumper->dumpOperand("accessor", m_accessor, false);
}
OpPutSetterByVal(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutSetterByVal(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutSetterByVal(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutSetterByVal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide16>(value, func);
else
setAttributes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide16>(value, func);
else
setAccessor<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
VirtualRegister m_property;
unsigned m_attributes;
VirtualRegister m_accessor;
};
struct OpDefineDataProperty : public Instruction {
static constexpr OpcodeID opcodeID = op_define_data_property;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, VirtualRegister attributes)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, value, attributes);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, VirtualRegister attributes)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, value, attributes);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, VirtualRegister attributes)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, value, attributes))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, value, attributes))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, value, attributes);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& property, VirtualRegister& value, VirtualRegister& attributes)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<VirtualRegister, __size>::check(attributes)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister value, VirtualRegister attributes)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, value, attributes)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<VirtualRegister, __size>::convert(attributes));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**define_data_property"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("attributes", m_attributes, false);
}
OpDefineDataProperty(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_attributes(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDefineDataProperty(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_attributes(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDefineDataProperty(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_attributes(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpDefineDataProperty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setAttributes(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide16>(value, func);
else
setAttributes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAttributes(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_value;
VirtualRegister m_attributes;
};
struct OpDefineAccessorProperty : public Instruction {
static constexpr OpcodeID opcodeID = op_define_accessor_property;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister getter, VirtualRegister setter, VirtualRegister attributes)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, getter, setter, attributes);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister getter, VirtualRegister setter, VirtualRegister attributes)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, getter, setter, attributes);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister getter, VirtualRegister setter, VirtualRegister attributes)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, getter, setter, attributes))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, getter, setter, attributes))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, getter, setter, attributes);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& property, VirtualRegister& getter, VirtualRegister& setter, VirtualRegister& attributes)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(getter)
&& Fits<VirtualRegister, __size>::check(setter)
&& Fits<VirtualRegister, __size>::check(attributes)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, VirtualRegister getter, VirtualRegister setter, VirtualRegister attributes)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, getter, setter, attributes)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(getter));
gen->write(Fits<VirtualRegister, __size>::convert(setter));
gen->write(Fits<VirtualRegister, __size>::convert(attributes));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**define_accessor_property"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("getter", m_getter, false);
dumper->dumpOperand("setter", m_setter, false);
dumper->dumpOperand("attributes", m_attributes, false);
}
OpDefineAccessorProperty(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_getter(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_setter(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_attributes(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDefineAccessorProperty(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_getter(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_setter(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_attributes(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDefineAccessorProperty(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_getter(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_setter(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_attributes(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpDefineAccessorProperty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setGetter(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setGetter<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setGetter<OpcodeSize::Wide16>(value, func);
else
setGetter<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGetter(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSetter(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSetter<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSetter<OpcodeSize::Wide16>(value, func);
else
setSetter<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSetter(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setAttributes(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide16>(value, func);
else
setAttributes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAttributes(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_getter;
VirtualRegister m_setter;
VirtualRegister m_attributes;
};
struct OpJmp : public Instruction {
static constexpr OpcodeID opcodeID = op_jmp;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jmp"[2 - __sizeShiftAmount]);
dumper->dumpOperand("targetLabel", m_targetLabel, true);
}
OpJmp(const uint8_t* stream)
: m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJmp(const uint16_t* stream)
: m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJmp(const uint32_t* stream)
: m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJmp decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
BoundLabel m_targetLabel;
};
struct OpJtrue : public Instruction {
static constexpr OpcodeID opcodeID = op_jtrue;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, condition, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, condition, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, condition, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& condition, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(condition)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, condition, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(condition));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jtrue"[2 - __sizeShiftAmount]);
dumper->dumpOperand("condition", m_condition, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJtrue(const uint8_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJtrue(const uint16_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJtrue(const uint32_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJtrue decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCondition<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCondition<OpcodeSize::Wide16>(value, func);
else
setCondition<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_condition;
BoundLabel m_targetLabel;
};
struct OpJfalse : public Instruction {
static constexpr OpcodeID opcodeID = op_jfalse;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, condition, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, condition, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, condition, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& condition, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(condition)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister condition, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, condition, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(condition));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jfalse"[2 - __sizeShiftAmount]);
dumper->dumpOperand("condition", m_condition, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJfalse(const uint8_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJfalse(const uint16_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJfalse(const uint32_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJfalse decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCondition<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCondition<OpcodeSize::Wide16>(value, func);
else
setCondition<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_condition;
BoundLabel m_targetLabel;
};
struct OpJeqNull : public Instruction {
static constexpr OpcodeID opcodeID = op_jeq_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jeq_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJeqNull(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJeqNull(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJeqNull(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJeqNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_value;
BoundLabel m_targetLabel;
};
struct OpJneqNull : public Instruction {
static constexpr OpcodeID opcodeID = op_jneq_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jneq_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJneqNull(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJneqNull(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJneqNull(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJneqNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_value;
BoundLabel m_targetLabel;
};
struct OpJundefinedOrNull : public Instruction {
static constexpr OpcodeID opcodeID = op_jundefined_or_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jundefined_or_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJundefinedOrNull(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJundefinedOrNull(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJundefinedOrNull(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJundefinedOrNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_value;
BoundLabel m_targetLabel;
};
struct OpJnundefinedOrNull : public Instruction {
static constexpr OpcodeID opcodeID = op_jnundefined_or_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jnundefined_or_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJnundefinedOrNull(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnundefinedOrNull(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnundefinedOrNull(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJnundefinedOrNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_value;
BoundLabel m_targetLabel;
};
struct OpJeq : public Instruction {
static constexpr OpcodeID opcodeID = op_jeq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jeq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJeq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJeq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJeq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJeq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJstricteq : public Instruction {
static constexpr OpcodeID opcodeID = op_jstricteq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jstricteq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJstricteq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJstricteq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJstricteq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJstricteq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJneq : public Instruction {
static constexpr OpcodeID opcodeID = op_jneq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jneq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJneq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJneq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJneq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJneq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJnstricteq : public Instruction {
static constexpr OpcodeID opcodeID = op_jnstricteq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jnstricteq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJnstricteq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnstricteq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnstricteq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJnstricteq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJless : public Instruction {
static constexpr OpcodeID opcodeID = op_jless;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jless"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJless(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJless(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJless(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJless decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJlesseq : public Instruction {
static constexpr OpcodeID opcodeID = op_jlesseq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jlesseq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJlesseq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJlesseq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJlesseq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJlesseq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJgreater : public Instruction {
static constexpr OpcodeID opcodeID = op_jgreater;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jgreater"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJgreater(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJgreater(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJgreater(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJgreater decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJgreatereq : public Instruction {
static constexpr OpcodeID opcodeID = op_jgreatereq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jgreatereq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJgreatereq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJgreatereq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJgreatereq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJgreatereq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJnless : public Instruction {
static constexpr OpcodeID opcodeID = op_jnless;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jnless"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJnless(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnless(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnless(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJnless decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJnlesseq : public Instruction {
static constexpr OpcodeID opcodeID = op_jnlesseq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jnlesseq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJnlesseq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnlesseq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJnlesseq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJnlesseq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJngreater : public Instruction {
static constexpr OpcodeID opcodeID = op_jngreater;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jngreater"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJngreater(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJngreater(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJngreater(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJngreater decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJngreatereq : public Instruction {
static constexpr OpcodeID opcodeID = op_jngreatereq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jngreatereq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJngreatereq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJngreatereq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJngreatereq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJngreatereq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJbelow : public Instruction {
static constexpr OpcodeID opcodeID = op_jbelow;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jbelow"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJbelow(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJbelow(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJbelow(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJbelow decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpJbeloweq : public Instruction {
static constexpr OpcodeID opcodeID = op_jbeloweq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, lhs, rhs, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, lhs, rhs, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, lhs, rhs, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& lhs, VirtualRegister& rhs, BoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& Fits<BoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister lhs, VirtualRegister rhs, BoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, lhs, rhs, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
gen->write(Fits<BoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jbeloweq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("lhs", m_lhs, true);
dumper->dumpOperand("rhs", m_rhs, false);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
OpJbeloweq(const uint8_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJbeloweq(const uint16_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpJbeloweq(const uint32_t* stream)
: m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_targetLabel(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpJbeloweq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
VirtualRegister m_lhs;
VirtualRegister m_rhs;
BoundLabel m_targetLabel;
};
struct OpLoopHint : public Instruction {
static constexpr OpcodeID opcodeID = op_loop_hint;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**loop_hint"[2 - __sizeShiftAmount]);
}
OpLoopHint(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLoopHint(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLoopHint(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpLoopHint decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpSwitchImm : public Instruction {
static constexpr OpcodeID opcodeID = op_switch_imm;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, tableIndex, defaultOffset, scrutinee);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, tableIndex, defaultOffset, scrutinee);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, tableIndex, defaultOffset, scrutinee))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, tableIndex, defaultOffset, scrutinee))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, tableIndex, defaultOffset, scrutinee);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& tableIndex, BoundLabel& defaultOffset, VirtualRegister& scrutinee)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(tableIndex)
&& Fits<BoundLabel, __size>::check(defaultOffset)
&& Fits<VirtualRegister, __size>::check(scrutinee)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, tableIndex, defaultOffset, scrutinee)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
gen->write(Fits<BoundLabel, __size>::convert(defaultOffset));
gen->write(Fits<VirtualRegister, __size>::convert(scrutinee));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**switch_imm"[2 - __sizeShiftAmount]);
dumper->dumpOperand("tableIndex", m_tableIndex, true);
dumper->dumpOperand("defaultOffset", m_defaultOffset, false);
dumper->dumpOperand("scrutinee", m_scrutinee, false);
}
OpSwitchImm(const uint8_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSwitchImm(const uint16_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSwitchImm(const uint32_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSwitchImm decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setDefaultOffset(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDefaultOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDefaultOffset<OpcodeSize::Wide16>(value, func);
else
setDefaultOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDefaultOffset(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
template<typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScrutinee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScrutinee<OpcodeSize::Wide16>(value, func);
else
setScrutinee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
unsigned m_tableIndex;
BoundLabel m_defaultOffset;
VirtualRegister m_scrutinee;
};
struct OpSwitchChar : public Instruction {
static constexpr OpcodeID opcodeID = op_switch_char;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, tableIndex, defaultOffset, scrutinee);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, tableIndex, defaultOffset, scrutinee);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, tableIndex, defaultOffset, scrutinee))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, tableIndex, defaultOffset, scrutinee))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, tableIndex, defaultOffset, scrutinee);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& tableIndex, BoundLabel& defaultOffset, VirtualRegister& scrutinee)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(tableIndex)
&& Fits<BoundLabel, __size>::check(defaultOffset)
&& Fits<VirtualRegister, __size>::check(scrutinee)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, tableIndex, defaultOffset, scrutinee)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
gen->write(Fits<BoundLabel, __size>::convert(defaultOffset));
gen->write(Fits<VirtualRegister, __size>::convert(scrutinee));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**switch_char"[2 - __sizeShiftAmount]);
dumper->dumpOperand("tableIndex", m_tableIndex, true);
dumper->dumpOperand("defaultOffset", m_defaultOffset, false);
dumper->dumpOperand("scrutinee", m_scrutinee, false);
}
OpSwitchChar(const uint8_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSwitchChar(const uint16_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSwitchChar(const uint32_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSwitchChar decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setDefaultOffset(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDefaultOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDefaultOffset<OpcodeSize::Wide16>(value, func);
else
setDefaultOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDefaultOffset(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
template<typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScrutinee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScrutinee<OpcodeSize::Wide16>(value, func);
else
setScrutinee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
unsigned m_tableIndex;
BoundLabel m_defaultOffset;
VirtualRegister m_scrutinee;
};
struct OpSwitchString : public Instruction {
static constexpr OpcodeID opcodeID = op_switch_string;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, tableIndex, defaultOffset, scrutinee);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, tableIndex, defaultOffset, scrutinee);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, tableIndex, defaultOffset, scrutinee))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, tableIndex, defaultOffset, scrutinee))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, tableIndex, defaultOffset, scrutinee);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& tableIndex, BoundLabel& defaultOffset, VirtualRegister& scrutinee)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(tableIndex)
&& Fits<BoundLabel, __size>::check(defaultOffset)
&& Fits<VirtualRegister, __size>::check(scrutinee)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned tableIndex, BoundLabel defaultOffset, VirtualRegister scrutinee)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, tableIndex, defaultOffset, scrutinee)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
gen->write(Fits<BoundLabel, __size>::convert(defaultOffset));
gen->write(Fits<VirtualRegister, __size>::convert(scrutinee));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**switch_string"[2 - __sizeShiftAmount]);
dumper->dumpOperand("tableIndex", m_tableIndex, true);
dumper->dumpOperand("defaultOffset", m_defaultOffset, false);
dumper->dumpOperand("scrutinee", m_scrutinee, false);
}
OpSwitchString(const uint8_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSwitchString(const uint16_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSwitchString(const uint32_t* stream)
: m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_defaultOffset(Fits<BoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
, m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSwitchString decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setDefaultOffset(BoundLabel value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDefaultOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDefaultOffset<OpcodeSize::Wide16>(value, func);
else
setDefaultOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDefaultOffset(BoundLabel value, Functor func)
{
if (!Fits<BoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<BoundLabel, size>::convert(value);
}
template<typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScrutinee<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScrutinee<OpcodeSize::Wide16>(value, func);
else
setScrutinee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
unsigned m_tableIndex;
BoundLabel m_defaultOffset;
VirtualRegister m_scrutinee;
};
struct OpNewFunc : public Instruction {
static constexpr OpcodeID opcodeID = op_new_func;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_func"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewFunc(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewFunc(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewFunc(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewFunc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpNewFuncExp : public Instruction {
static constexpr OpcodeID opcodeID = op_new_func_exp;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_func_exp"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewFuncExp(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewFuncExp(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewFuncExp(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewFuncExp decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpNewGeneratorFunc : public Instruction {
static constexpr OpcodeID opcodeID = op_new_generator_func;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_generator_func"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewGeneratorFunc(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewGeneratorFunc(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewGeneratorFunc(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewGeneratorFunc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpNewGeneratorFuncExp : public Instruction {
static constexpr OpcodeID opcodeID = op_new_generator_func_exp;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_generator_func_exp"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewGeneratorFuncExp(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewGeneratorFuncExp(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewGeneratorFuncExp(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewGeneratorFuncExp decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpNewAsyncFunc : public Instruction {
static constexpr OpcodeID opcodeID = op_new_async_func;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_async_func"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewAsyncFunc(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncFunc(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncFunc(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewAsyncFunc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpNewAsyncFuncExp : public Instruction {
static constexpr OpcodeID opcodeID = op_new_async_func_exp;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_async_func_exp"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewAsyncFuncExp(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncFuncExp(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncFuncExp(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewAsyncFuncExp decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpNewAsyncGeneratorFunc : public Instruction {
static constexpr OpcodeID opcodeID = op_new_async_generator_func;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_async_generator_func"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewAsyncGeneratorFunc(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncGeneratorFunc(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncGeneratorFunc(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewAsyncGeneratorFunc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpNewAsyncGeneratorFuncExp : public Instruction {
static constexpr OpcodeID opcodeID = op_new_async_generator_func_exp;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, functionDecl);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, functionDecl);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, functionDecl))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, functionDecl);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& functionDecl)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(functionDecl)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned functionDecl)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, functionDecl)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(functionDecl));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_async_generator_func_exp"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("functionDecl", m_functionDecl, false);
}
OpNewAsyncGeneratorFuncExp(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncGeneratorFuncExp(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewAsyncGeneratorFuncExp(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_functionDecl(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewAsyncGeneratorFuncExp decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunctionDecl<OpcodeSize::Wide16>(value, func);
else
setFunctionDecl<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionDecl(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_functionDecl;
};
struct OpSetFunctionName : public Instruction {
static constexpr OpcodeID opcodeID = op_set_function_name;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister function, VirtualRegister name)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, function, name);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister function, VirtualRegister name)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, function, name);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister function, VirtualRegister name)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, function, name))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, function, name))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, function, name);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& function, VirtualRegister& name)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(function)
&& Fits<VirtualRegister, __size>::check(name)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister function, VirtualRegister name)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, function, name)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(function));
gen->write(Fits<VirtualRegister, __size>::convert(name));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**set_function_name"[2 - __sizeShiftAmount]);
dumper->dumpOperand("function", m_function, true);
dumper->dumpOperand("name", m_name, false);
}
OpSetFunctionName(const uint8_t* stream)
: m_function(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_name(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSetFunctionName(const uint16_t* stream)
: m_function(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_name(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSetFunctionName(const uint32_t* stream)
: m_function(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_name(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSetFunctionName decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setFunction(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setFunction<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setFunction<OpcodeSize::Wide16>(value, func);
else
setFunction<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunction(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setName(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setName<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setName<OpcodeSize::Wide16>(value, func);
else
setName<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setName(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_function;
VirtualRegister m_name;
};
struct OpRet : public Instruction {
static constexpr OpcodeID opcodeID = op_ret;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**ret"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
}
OpRet(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpRet(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpRet(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpRet decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_value;
};
struct OpStrcat : public Instruction {
static constexpr OpcodeID opcodeID = op_strcat;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src, int count)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, src, count);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src, int count)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, src, count);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src, int count)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, src, count))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, src, count))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, src, count);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& src, int& count)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(src)
&& Fits<int, __size>::check(count)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src, int count)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, src, count)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(src));
gen->write(Fits<int, __size>::convert(count));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**strcat"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("src", m_src, false);
dumper->dumpOperand("count", m_count, false);
}
OpStrcat(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_count(Fits<int, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpStrcat(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_count(Fits<int, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpStrcat(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_count(Fits<int, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpStrcat decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide16>(value, func);
else
setSrc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCount(int value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCount<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCount<OpcodeSize::Wide16>(value, func);
else
setCount<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCount(int value, Functor func)
{
if (!Fits<int, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<int, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_src;
int m_count;
};
struct OpToPrimitive : public Instruction {
static constexpr OpcodeID opcodeID = op_to_primitive;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, src);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, src);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, src);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& src)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(src)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, src)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(src));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_primitive"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("src", m_src, false);
}
OpToPrimitive(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToPrimitive(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToPrimitive(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToPrimitive decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide16>(value, func);
else
setSrc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_src;
};
struct OpToPropertyKey : public Instruction {
static constexpr OpcodeID opcodeID = op_to_property_key;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, src);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, src);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, src);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& src)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(src)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, src)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(src));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_property_key"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("src", m_src, false);
}
OpToPropertyKey(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToPropertyKey(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToPropertyKey(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToPropertyKey decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide16>(value, func);
else
setSrc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_src;
};
struct OpPutToArguments : public Instruction {
static constexpr OpcodeID opcodeID = op_put_to_arguments;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister arguments, unsigned index, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, arguments, index, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister arguments, unsigned index, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, arguments, index, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister arguments, unsigned index, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, arguments, index, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, arguments, index, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, arguments, index, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& arguments, unsigned& index, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(arguments)
&& Fits<unsigned, __size>::check(index)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister arguments, unsigned index, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, arguments, index, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(arguments));
gen->write(Fits<unsigned, __size>::convert(index));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_to_arguments"[2 - __sizeShiftAmount]);
dumper->dumpOperand("arguments", m_arguments, true);
dumper->dumpOperand("index", m_index, false);
dumper->dumpOperand("value", m_value, false);
}
OpPutToArguments(const uint8_t* stream)
: m_arguments(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_index(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutToArguments(const uint16_t* stream)
: m_arguments(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_index(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutToArguments(const uint32_t* stream)
: m_arguments(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_index(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutToArguments decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArguments<OpcodeSize::Wide16>(value, func);
else
setArguments<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArguments(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_arguments;
unsigned m_index;
VirtualRegister m_value;
};
struct OpPushWithScope : public Instruction {
static constexpr OpcodeID opcodeID = op_push_with_scope;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister currentScope, VirtualRegister newScope)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, currentScope, newScope);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister currentScope, VirtualRegister newScope)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, currentScope, newScope);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister currentScope, VirtualRegister newScope)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, currentScope, newScope))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, currentScope, newScope))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, currentScope, newScope);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& currentScope, VirtualRegister& newScope)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(currentScope)
&& Fits<VirtualRegister, __size>::check(newScope)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister currentScope, VirtualRegister newScope)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, currentScope, newScope)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(currentScope));
gen->write(Fits<VirtualRegister, __size>::convert(newScope));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**push_with_scope"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("currentScope", m_currentScope, false);
dumper->dumpOperand("newScope", m_newScope, false);
}
OpPushWithScope(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_currentScope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_newScope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPushWithScope(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_currentScope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_newScope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPushWithScope(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_currentScope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_newScope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPushWithScope decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCurrentScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setCurrentScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setCurrentScope<OpcodeSize::Wide16>(value, func);
else
setCurrentScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCurrentScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setNewScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setNewScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setNewScope<OpcodeSize::Wide16>(value, func);
else
setNewScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNewScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_currentScope;
VirtualRegister m_newScope;
};
struct OpCreateLexicalEnvironment : public Instruction {
static constexpr OpcodeID opcodeID = op_create_lexical_environment;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, symbolTable, initialValue);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, symbolTable, initialValue);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, symbolTable, initialValue))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, symbolTable, initialValue))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, symbolTable, initialValue);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, VirtualRegister& symbolTable, VirtualRegister& initialValue)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<VirtualRegister, __size>::check(symbolTable)
&& Fits<VirtualRegister, __size>::check(initialValue)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, symbolTable, initialValue)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<VirtualRegister, __size>::convert(symbolTable));
gen->write(Fits<VirtualRegister, __size>::convert(initialValue));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_lexical_environment"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("symbolTable", m_symbolTable, false);
dumper->dumpOperand("initialValue", m_initialValue, false);
}
OpCreateLexicalEnvironment(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_symbolTable(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_initialValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateLexicalEnvironment(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_symbolTable(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_initialValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateLexicalEnvironment(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_symbolTable(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_initialValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateLexicalEnvironment decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSymbolTable(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSymbolTable<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSymbolTable<OpcodeSize::Wide16>(value, func);
else
setSymbolTable<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSymbolTable(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setInitialValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setInitialValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setInitialValue<OpcodeSize::Wide16>(value, func);
else
setInitialValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setInitialValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
VirtualRegister m_symbolTable;
VirtualRegister m_initialValue;
};
struct OpCreateGeneratorFrameEnvironment : public Instruction {
static constexpr OpcodeID opcodeID = op_create_generator_frame_environment;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, symbolTable, initialValue);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, symbolTable, initialValue);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, symbolTable, initialValue))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, symbolTable, initialValue))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, symbolTable, initialValue);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, VirtualRegister& symbolTable, VirtualRegister& initialValue)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<VirtualRegister, __size>::check(symbolTable)
&& Fits<VirtualRegister, __size>::check(initialValue)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, VirtualRegister symbolTable, VirtualRegister initialValue)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, symbolTable, initialValue)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<VirtualRegister, __size>::convert(symbolTable));
gen->write(Fits<VirtualRegister, __size>::convert(initialValue));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_generator_frame_environment"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("symbolTable", m_symbolTable, false);
dumper->dumpOperand("initialValue", m_initialValue, false);
}
OpCreateGeneratorFrameEnvironment(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_symbolTable(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_initialValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateGeneratorFrameEnvironment(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_symbolTable(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_initialValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateGeneratorFrameEnvironment(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_symbolTable(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_initialValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateGeneratorFrameEnvironment decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSymbolTable(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSymbolTable<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSymbolTable<OpcodeSize::Wide16>(value, func);
else
setSymbolTable<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSymbolTable(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setInitialValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setInitialValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setInitialValue<OpcodeSize::Wide16>(value, func);
else
setInitialValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setInitialValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
VirtualRegister m_symbolTable;
VirtualRegister m_initialValue;
};
struct OpGetParentScope : public Instruction {
static constexpr OpcodeID opcodeID = op_get_parent_scope;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_parent_scope"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
}
OpGetParentScope(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetParentScope(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetParentScope(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetParentScope decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
};
struct OpThrow : public Instruction {
static constexpr OpcodeID opcodeID = op_throw;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**throw"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
}
OpThrow(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpThrow(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpThrow(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpThrow decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_value;
};
struct OpThrowStaticError : public Instruction {
static constexpr OpcodeID opcodeID = op_throw_static_error;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister message, ErrorTypeWithExtension errorType)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, message, errorType);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister message, ErrorTypeWithExtension errorType)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, message, errorType);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister message, ErrorTypeWithExtension errorType)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, message, errorType))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, message, errorType))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, message, errorType);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& message, ErrorTypeWithExtension& errorType)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(message)
&& Fits<ErrorTypeWithExtension, __size>::check(errorType)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister message, ErrorTypeWithExtension errorType)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, message, errorType)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(message));
gen->write(Fits<ErrorTypeWithExtension, __size>::convert(errorType));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**throw_static_error"[2 - __sizeShiftAmount]);
dumper->dumpOperand("message", m_message, true);
dumper->dumpOperand("errorType", m_errorType, false);
}
OpThrowStaticError(const uint8_t* stream)
: m_message(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_errorType(Fits<ErrorTypeWithExtension, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpThrowStaticError(const uint16_t* stream)
: m_message(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_errorType(Fits<ErrorTypeWithExtension, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpThrowStaticError(const uint32_t* stream)
: m_message(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_errorType(Fits<ErrorTypeWithExtension, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpThrowStaticError decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setMessage(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setMessage<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setMessage<OpcodeSize::Wide16>(value, func);
else
setMessage<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setMessage(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setErrorType(ErrorTypeWithExtension value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setErrorType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setErrorType<OpcodeSize::Wide16>(value, func);
else
setErrorType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setErrorType(ErrorTypeWithExtension value, Functor func)
{
if (!Fits<ErrorTypeWithExtension, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ErrorTypeWithExtension, size>::convert(value);
}
VirtualRegister m_message;
ErrorTypeWithExtension m_errorType;
};
struct OpDebug : public Instruction {
static constexpr OpcodeID opcodeID = op_debug;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, DebugHookType debugHookType, bool hasBreakpoint)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, debugHookType, hasBreakpoint);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, DebugHookType debugHookType, bool hasBreakpoint)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, debugHookType, hasBreakpoint);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, DebugHookType debugHookType, bool hasBreakpoint)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, debugHookType, hasBreakpoint))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, debugHookType, hasBreakpoint))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, debugHookType, hasBreakpoint);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, DebugHookType& debugHookType, bool& hasBreakpoint)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<DebugHookType, __size>::check(debugHookType)
&& Fits<bool, __size>::check(hasBreakpoint)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, DebugHookType debugHookType, bool hasBreakpoint)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, debugHookType, hasBreakpoint)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<DebugHookType, __size>::convert(debugHookType));
gen->write(Fits<bool, __size>::convert(hasBreakpoint));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**debug"[2 - __sizeShiftAmount]);
dumper->dumpOperand("debugHookType", m_debugHookType, true);
dumper->dumpOperand("hasBreakpoint", m_hasBreakpoint, false);
}
OpDebug(const uint8_t* stream)
: m_debugHookType(Fits<DebugHookType, OpcodeSize::Narrow>::convert(stream[0]))
, m_hasBreakpoint(Fits<bool, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDebug(const uint16_t* stream)
: m_debugHookType(Fits<DebugHookType, OpcodeSize::Wide16>::convert(stream[0]))
, m_hasBreakpoint(Fits<bool, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDebug(const uint32_t* stream)
: m_debugHookType(Fits<DebugHookType, OpcodeSize::Wide32>::convert(stream[0]))
, m_hasBreakpoint(Fits<bool, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpDebug decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDebugHookType(DebugHookType value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDebugHookType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDebugHookType<OpcodeSize::Wide16>(value, func);
else
setDebugHookType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDebugHookType(DebugHookType value, Functor func)
{
if (!Fits<DebugHookType, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<DebugHookType, size>::convert(value);
}
template<typename Functor>
void setHasBreakpoint(bool value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setHasBreakpoint<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setHasBreakpoint<OpcodeSize::Wide16>(value, func);
else
setHasBreakpoint<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setHasBreakpoint(bool value, Functor func)
{
if (!Fits<bool, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<bool, size>::convert(value);
}
DebugHookType m_debugHookType;
bool m_hasBreakpoint;
};
struct OpEnd : public Instruction {
static constexpr OpcodeID opcodeID = op_end;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**end"[2 - __sizeShiftAmount]);
dumper->dumpOperand("value", m_value, true);
}
OpEnd(const uint8_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnd(const uint16_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnd(const uint32_t* stream)
: m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpEnd decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_value;
};
struct OpGetEnumerableLength : public Instruction {
static constexpr OpcodeID opcodeID = op_get_enumerable_length;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_enumerable_length"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
}
OpGetEnumerableLength(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetEnumerableLength(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetEnumerableLength(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetEnumerableLength decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
};
struct OpHasEnumerableStructureProperty : public Instruction {
static constexpr OpcodeID opcodeID = op_has_enumerable_structure_property;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property, enumerator);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, enumerator);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, enumerator))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, enumerator))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, enumerator);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, VirtualRegister& enumerator)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(enumerator)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, enumerator)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(enumerator));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**has_enumerable_structure_property"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("enumerator", m_enumerator, false);
}
OpHasEnumerableStructureProperty(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasEnumerableStructureProperty(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasEnumerableStructureProperty(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpHasEnumerableStructureProperty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide16>(value, func);
else
setEnumerator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_enumerator;
};
struct OpHasOwnStructureProperty : public Instruction {
static constexpr OpcodeID opcodeID = op_has_own_structure_property;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property, enumerator);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, enumerator);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, enumerator))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, enumerator))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, enumerator);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, VirtualRegister& enumerator)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(enumerator)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, enumerator)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(enumerator));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**has_own_structure_property"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("enumerator", m_enumerator, false);
}
OpHasOwnStructureProperty(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasOwnStructureProperty(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasOwnStructureProperty(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpHasOwnStructureProperty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide16>(value, func);
else
setEnumerator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_enumerator;
};
struct OpInStructureProperty : public Instruction {
static constexpr OpcodeID opcodeID = op_in_structure_property;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property, enumerator);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, enumerator);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, enumerator))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, enumerator))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, enumerator);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, VirtualRegister& enumerator)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(enumerator)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, VirtualRegister enumerator)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, enumerator)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(enumerator));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**in_structure_property"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("enumerator", m_enumerator, false);
}
OpInStructureProperty(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInStructureProperty(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInStructureProperty(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpInStructureProperty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide16>(value, func);
else
setEnumerator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
VirtualRegister m_enumerator;
};
struct OpHasEnumerableProperty : public Instruction {
static constexpr OpcodeID opcodeID = op_has_enumerable_property;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**has_enumerable_property"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpHasEnumerableProperty(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasEnumerableProperty(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpHasEnumerableProperty(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpHasEnumerableProperty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
};
struct OpGetPropertyEnumerator : public Instruction {
static constexpr OpcodeID opcodeID = op_get_property_enumerator;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_property_enumerator"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
}
OpGetPropertyEnumerator(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetPropertyEnumerator(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetPropertyEnumerator(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetPropertyEnumerator decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
};
struct OpEnumeratorStructurePname : public Instruction {
static constexpr OpcodeID opcodeID = op_enumerator_structure_pname;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, enumerator, index);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, enumerator, index);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, enumerator, index))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, enumerator, index))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, enumerator, index);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& enumerator, VirtualRegister& index)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(enumerator)
&& Fits<VirtualRegister, __size>::check(index)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, enumerator, index)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(enumerator));
gen->write(Fits<VirtualRegister, __size>::convert(index));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**enumerator_structure_pname"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("enumerator", m_enumerator, false);
dumper->dumpOperand("index", m_index, false);
}
OpEnumeratorStructurePname(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_index(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnumeratorStructurePname(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnumeratorStructurePname(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpEnumeratorStructurePname decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide16>(value, func);
else
setEnumerator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_enumerator;
VirtualRegister m_index;
};
struct OpEnumeratorGenericPname : public Instruction {
static constexpr OpcodeID opcodeID = op_enumerator_generic_pname;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, enumerator, index);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, enumerator, index);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, enumerator, index))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, enumerator, index))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, enumerator, index);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& enumerator, VirtualRegister& index)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(enumerator)
&& Fits<VirtualRegister, __size>::check(index)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister enumerator, VirtualRegister index)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, enumerator, index)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(enumerator));
gen->write(Fits<VirtualRegister, __size>::convert(index));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**enumerator_generic_pname"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("enumerator", m_enumerator, false);
dumper->dumpOperand("index", m_index, false);
}
OpEnumeratorGenericPname(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_index(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnumeratorGenericPname(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnumeratorGenericPname(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_enumerator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpEnumeratorGenericPname decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEnumerator<OpcodeSize::Wide16>(value, func);
else
setEnumerator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEnumerator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_enumerator;
VirtualRegister m_index;
};
struct OpToIndexString : public Instruction {
static constexpr OpcodeID opcodeID = op_to_index_string;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, index);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, index);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, index))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, index))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, index);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& index)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(index)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, index)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(index));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_index_string"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("index", m_index, false);
}
OpToIndexString(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_index(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToIndexString(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToIndexString(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToIndexString decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_index;
};
struct OpUnreachable : public Instruction {
static constexpr OpcodeID opcodeID = op_unreachable;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**unreachable"[2 - __sizeShiftAmount]);
}
OpUnreachable(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpUnreachable(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpUnreachable(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpUnreachable decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpCreateRest : public Instruction {
static constexpr OpcodeID opcodeID = op_create_rest;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arraySize, unsigned numParametersToSkip)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, arraySize, numParametersToSkip);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arraySize, unsigned numParametersToSkip)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, arraySize, numParametersToSkip);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arraySize, unsigned numParametersToSkip)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, arraySize, numParametersToSkip))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, arraySize, numParametersToSkip))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, arraySize, numParametersToSkip);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& arraySize, unsigned& numParametersToSkip)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(arraySize)
&& Fits<unsigned, __size>::check(numParametersToSkip)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister arraySize, unsigned numParametersToSkip)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, arraySize, numParametersToSkip)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(arraySize));
gen->write(Fits<unsigned, __size>::convert(numParametersToSkip));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_rest"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("arraySize", m_arraySize, false);
dumper->dumpOperand("numParametersToSkip", m_numParametersToSkip, false);
}
OpCreateRest(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_arraySize(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_numParametersToSkip(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateRest(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_arraySize(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_numParametersToSkip(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateRest(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_arraySize(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_numParametersToSkip(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateRest decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArraySize(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArraySize<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArraySize<OpcodeSize::Wide16>(value, func);
else
setArraySize<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArraySize(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setNumParametersToSkip(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setNumParametersToSkip<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setNumParametersToSkip<OpcodeSize::Wide16>(value, func);
else
setNumParametersToSkip<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNumParametersToSkip(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_arraySize;
unsigned m_numParametersToSkip;
};
struct OpGetRestLength : public Instruction {
static constexpr OpcodeID opcodeID = op_get_rest_length;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned numParametersToSkip)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, numParametersToSkip);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned numParametersToSkip)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, numParametersToSkip);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, unsigned numParametersToSkip)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, numParametersToSkip))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, numParametersToSkip))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, numParametersToSkip);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, unsigned& numParametersToSkip)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<unsigned, __size>::check(numParametersToSkip)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, unsigned numParametersToSkip)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, numParametersToSkip)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<unsigned, __size>::convert(numParametersToSkip));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_rest_length"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("numParametersToSkip", m_numParametersToSkip, false);
}
OpGetRestLength(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_numParametersToSkip(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetRestLength(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_numParametersToSkip(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetRestLength(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_numParametersToSkip(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetRestLength decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setNumParametersToSkip(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setNumParametersToSkip<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setNumParametersToSkip<OpcodeSize::Wide16>(value, func);
else
setNumParametersToSkip<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNumParametersToSkip(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
unsigned m_numParametersToSkip;
};
struct OpYield : public Instruction {
static constexpr OpcodeID opcodeID = op_yield;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister generator, unsigned yieldPoint, VirtualRegister argument)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, generator, yieldPoint, argument);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister generator, unsigned yieldPoint, VirtualRegister argument)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, generator, yieldPoint, argument);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister generator, unsigned yieldPoint, VirtualRegister argument)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, generator, yieldPoint, argument))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, generator, yieldPoint, argument))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, generator, yieldPoint, argument);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& generator, unsigned& yieldPoint, VirtualRegister& argument)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(generator)
&& Fits<unsigned, __size>::check(yieldPoint)
&& Fits<VirtualRegister, __size>::check(argument)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister generator, unsigned yieldPoint, VirtualRegister argument)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, generator, yieldPoint, argument)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(generator));
gen->write(Fits<unsigned, __size>::convert(yieldPoint));
gen->write(Fits<VirtualRegister, __size>::convert(argument));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**yield"[2 - __sizeShiftAmount]);
dumper->dumpOperand("generator", m_generator, true);
dumper->dumpOperand("yieldPoint", m_yieldPoint, false);
dumper->dumpOperand("argument", m_argument, false);
}
OpYield(const uint8_t* stream)
: m_generator(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_yieldPoint(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_argument(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpYield(const uint16_t* stream)
: m_generator(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_yieldPoint(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_argument(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpYield(const uint32_t* stream)
: m_generator(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_yieldPoint(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_argument(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpYield decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setGenerator(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setGenerator<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setGenerator<OpcodeSize::Wide16>(value, func);
else
setGenerator<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGenerator(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setYieldPoint(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setYieldPoint<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setYieldPoint<OpcodeSize::Wide16>(value, func);
else
setYieldPoint<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setYieldPoint(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setArgument(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgument<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgument<OpcodeSize::Wide16>(value, func);
else
setArgument<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgument(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_generator;
unsigned m_yieldPoint;
VirtualRegister m_argument;
};
struct OpCheckTraps : public Instruction {
static constexpr OpcodeID opcodeID = op_check_traps;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**check_traps"[2 - __sizeShiftAmount]);
}
OpCheckTraps(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCheckTraps(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCheckTraps(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCheckTraps decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpLogShadowChickenPrologue : public Instruction {
static constexpr OpcodeID opcodeID = op_log_shadow_chicken_prologue;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister scope)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, scope);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister scope)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, scope);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister scope)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, scope))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, scope))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, scope);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& scope)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(scope)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister scope)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, scope)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**log_shadow_chicken_prologue"[2 - __sizeShiftAmount]);
dumper->dumpOperand("scope", m_scope, true);
}
OpLogShadowChickenPrologue(const uint8_t* stream)
: m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLogShadowChickenPrologue(const uint16_t* stream)
: m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLogShadowChickenPrologue(const uint32_t* stream)
: m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpLogShadowChickenPrologue decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_scope;
};
struct OpLogShadowChickenTail : public Instruction {
static constexpr OpcodeID opcodeID = op_log_shadow_chicken_tail;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister thisValue, VirtualRegister scope)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, thisValue, scope);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister thisValue, VirtualRegister scope)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, thisValue, scope);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister thisValue, VirtualRegister scope)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, thisValue, scope))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, thisValue, scope))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, thisValue, scope);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& thisValue, VirtualRegister& scope)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<VirtualRegister, __size>::check(scope)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister thisValue, VirtualRegister scope)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, thisValue, scope)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**log_shadow_chicken_tail"[2 - __sizeShiftAmount]);
dumper->dumpOperand("thisValue", m_thisValue, true);
dumper->dumpOperand("scope", m_scope, false);
}
OpLogShadowChickenTail(const uint8_t* stream)
: m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLogShadowChickenTail(const uint16_t* stream)
: m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLogShadowChickenTail(const uint32_t* stream)
: m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpLogShadowChickenTail decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_thisValue;
VirtualRegister m_scope;
};
struct OpResolveScopeForHoistingFuncDeclInEval : public Instruction {
static constexpr OpcodeID opcodeID = op_resolve_scope_for_hoisting_func_decl_in_eval;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned property)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope, property);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned property)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope, property))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope, property))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope, property);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope, unsigned& property)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& Fits<unsigned, __size>::check(property)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope, unsigned property)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope, property)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
gen->write(Fits<unsigned, __size>::convert(property));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**resolve_scope_for_hoisting_func_decl_in_eval"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
dumper->dumpOperand("property", m_property, false);
}
OpResolveScopeForHoistingFuncDeclInEval(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpResolveScopeForHoistingFuncDeclInEval(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpResolveScopeForHoistingFuncDeclInEval(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpResolveScopeForHoistingFuncDeclInEval decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
unsigned m_property;
};
struct OpPutInternalField : public Instruction {
static constexpr OpcodeID opcodeID = op_put_internal_field;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, unsigned index, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, index, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, unsigned index, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, index, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, unsigned index, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, index, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, index, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, index, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, unsigned& index, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(index)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, unsigned index, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, index, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(index));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_internal_field"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("index", m_index, false);
dumper->dumpOperand("value", m_value, false);
}
OpPutInternalField(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_index(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutInternalField(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_index(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutInternalField(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_index(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutInternalField decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
unsigned m_index;
VirtualRegister m_value;
};
struct OpNop : public Instruction {
static constexpr OpcodeID opcodeID = op_nop;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**nop"[2 - __sizeShiftAmount]);
}
OpNop(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNop(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNop(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNop decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpSuperSamplerBegin : public Instruction {
static constexpr OpcodeID opcodeID = op_super_sampler_begin;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**super_sampler_begin"[2 - __sizeShiftAmount]);
}
OpSuperSamplerBegin(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSuperSamplerBegin(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSuperSamplerBegin(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSuperSamplerBegin decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpWide16 : public Instruction {
static constexpr OpcodeID opcodeID = op_wide16;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**wide16"[2 - __sizeShiftAmount]);
}
OpWide16(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpWide16(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpWide16(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpWide16 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpSuperSamplerEnd : public Instruction {
static constexpr OpcodeID opcodeID = op_super_sampler_end;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**super_sampler_end"[2 - __sizeShiftAmount]);
}
OpSuperSamplerEnd(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSuperSamplerEnd(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSuperSamplerEnd(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSuperSamplerEnd decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpWide32 : public Instruction {
static constexpr OpcodeID opcodeID = op_wide32;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**wide32"[2 - __sizeShiftAmount]);
}
OpWide32(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpWide32(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpWide32(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpWide32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpEnter : public Instruction {
static constexpr OpcodeID opcodeID = op_enter;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**enter"[2 - __sizeShiftAmount]);
}
OpEnter(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnter(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEnter(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpEnter decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct OpGetScope : public Instruction {
static constexpr OpcodeID opcodeID = op_get_scope;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_scope"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
}
OpGetScope(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetScope(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGetScope(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGetScope decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
};
struct OpCreateDirectArguments : public Instruction {
static constexpr OpcodeID opcodeID = op_create_direct_arguments;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_direct_arguments"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
}
OpCreateDirectArguments(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateDirectArguments(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateDirectArguments(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateDirectArguments decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
};
struct OpCreateScopedArguments : public Instruction {
static constexpr OpcodeID opcodeID = op_create_scoped_arguments;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, scope);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, scope);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, scope))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, scope))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, scope);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& scope)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(scope)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister scope)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, scope)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(scope));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_scoped_arguments"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("scope", m_scope, false);
}
OpCreateScopedArguments(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateScopedArguments(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateScopedArguments(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_scope(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateScopedArguments decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setScope<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setScope<OpcodeSize::Wide16>(value, func);
else
setScope<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScope(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_scope;
};
struct OpCreateClonedArguments : public Instruction {
static constexpr OpcodeID opcodeID = op_create_cloned_arguments;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_cloned_arguments"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
}
OpCreateClonedArguments(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateClonedArguments(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateClonedArguments(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateClonedArguments decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
};
struct OpCreateArgumentsButterfly : public Instruction {
static constexpr OpcodeID opcodeID = op_create_arguments_butterfly;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**create_arguments_butterfly"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
}
OpCreateArgumentsButterfly(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateArgumentsButterfly(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCreateArgumentsButterfly(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCreateArgumentsButterfly decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
};
struct OpNewPromise : public Instruction {
static constexpr OpcodeID opcodeID = op_new_promise;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, bool isInternalPromise)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, isInternalPromise);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, bool isInternalPromise)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, isInternalPromise);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, bool isInternalPromise)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, isInternalPromise))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, isInternalPromise))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, isInternalPromise);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, bool& isInternalPromise)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<bool, __size>::check(isInternalPromise)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, bool isInternalPromise)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, isInternalPromise)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<bool, __size>::convert(isInternalPromise));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_promise"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("isInternalPromise", m_isInternalPromise, false);
}
OpNewPromise(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_isInternalPromise(Fits<bool, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewPromise(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_isInternalPromise(Fits<bool, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewPromise(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_isInternalPromise(Fits<bool, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewPromise decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIsInternalPromise(bool value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setIsInternalPromise<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setIsInternalPromise<OpcodeSize::Wide16>(value, func);
else
setIsInternalPromise<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIsInternalPromise(bool value, Functor func)
{
if (!Fits<bool, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<bool, size>::convert(value);
}
VirtualRegister m_dst;
bool m_isInternalPromise;
};
struct OpNewGenerator : public Instruction {
static constexpr OpcodeID opcodeID = op_new_generator;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_generator"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
}
OpNewGenerator(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewGenerator(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewGenerator(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewGenerator decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
};
struct OpArgumentCount : public Instruction {
static constexpr OpcodeID opcodeID = op_argument_count;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**argument_count"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
}
OpArgumentCount(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpArgumentCount(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpArgumentCount(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpArgumentCount decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
};
struct OpCheckTdz : public Instruction {
static constexpr OpcodeID opcodeID = op_check_tdz;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, targetVirtualRegister);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, targetVirtualRegister);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, targetVirtualRegister))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, targetVirtualRegister))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, targetVirtualRegister);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& targetVirtualRegister)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(targetVirtualRegister)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister targetVirtualRegister)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, targetVirtualRegister)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(targetVirtualRegister));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**check_tdz"[2 - __sizeShiftAmount]);
dumper->dumpOperand("targetVirtualRegister", m_targetVirtualRegister, true);
}
OpCheckTdz(const uint8_t* stream)
: m_targetVirtualRegister(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCheckTdz(const uint16_t* stream)
: m_targetVirtualRegister(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpCheckTdz(const uint32_t* stream)
: m_targetVirtualRegister(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpCheckTdz decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTargetVirtualRegister(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTargetVirtualRegister<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTargetVirtualRegister<OpcodeSize::Wide16>(value, func);
else
setTargetVirtualRegister<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetVirtualRegister(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_targetVirtualRegister;
};
struct OpNewArrayWithSpread : public Instruction {
static constexpr OpcodeID opcodeID = op_new_array_with_spread;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, unsigned bitVector)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, argv, argc, bitVector);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, unsigned bitVector)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, argv, argc, bitVector);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, unsigned bitVector)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, argv, argc, bitVector))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, argv, argc, bitVector))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, argv, argc, bitVector);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& argv, unsigned& argc, unsigned& bitVector)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(argv)
&& Fits<unsigned, __size>::check(argc)
&& Fits<unsigned, __size>::check(bitVector)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argv, unsigned argc, unsigned bitVector)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, argv, argc, bitVector)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(argv));
gen->write(Fits<unsigned, __size>::convert(argc));
gen->write(Fits<unsigned, __size>::convert(bitVector));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_array_with_spread"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("argv", m_argv, false);
dumper->dumpOperand("argc", m_argc, false);
dumper->dumpOperand("bitVector", m_bitVector, false);
}
OpNewArrayWithSpread(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_argv(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_bitVector(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArrayWithSpread(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_argv(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_bitVector(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewArrayWithSpread(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_argv(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_argc(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_bitVector(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewArrayWithSpread decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgv(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgv<OpcodeSize::Wide16>(value, func);
else
setArgv<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgv(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgc(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgc<OpcodeSize::Wide16>(value, func);
else
setArgc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgc(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setBitVector(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBitVector<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBitVector<OpcodeSize::Wide16>(value, func);
else
setBitVector<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBitVector(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_argv;
unsigned m_argc;
unsigned m_bitVector;
};
struct OpSpread : public Instruction {
static constexpr OpcodeID opcodeID = op_spread;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argument)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, argument);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argument)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, argument);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argument)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, argument))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, argument))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, argument);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& argument)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(argument)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister argument)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, argument)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(argument));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**spread"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("argument", m_argument, false);
}
OpSpread(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_argument(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSpread(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_argument(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpSpread(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_argument(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpSpread decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setArgument(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setArgument<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setArgument<OpcodeSize::Wide16>(value, func);
else
setArgument<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setArgument(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_argument;
};
struct OpNewRegexp : public Instruction {
static constexpr OpcodeID opcodeID = op_new_regexp;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister regexp)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, regexp);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister regexp)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, regexp);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister regexp)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, regexp))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, regexp))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, regexp);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& regexp)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(regexp)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister regexp)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, regexp)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(regexp));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**new_regexp"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("regexp", m_regexp, false);
}
OpNewRegexp(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_regexp(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewRegexp(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_regexp(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNewRegexp(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_regexp(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNewRegexp decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRegexp(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRegexp<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRegexp<OpcodeSize::Wide16>(value, func);
else
setRegexp<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRegexp(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_regexp;
};
struct OpMov : public Instruction {
static constexpr OpcodeID opcodeID = op_mov;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, src);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, src);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, src);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& src)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(src)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, src)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(src));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**mov"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("src", m_src, false);
}
OpMov(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpMov(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpMov(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpMov decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrc<OpcodeSize::Wide16>(value, func);
else
setSrc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_src;
};
struct OpEq : public Instruction {
static constexpr OpcodeID opcodeID = op_eq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**eq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpEq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpEq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpNeq : public Instruction {
static constexpr OpcodeID opcodeID = op_neq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**neq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpNeq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNeq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNeq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNeq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpStricteq : public Instruction {
static constexpr OpcodeID opcodeID = op_stricteq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**stricteq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpStricteq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpStricteq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpStricteq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpStricteq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpNstricteq : public Instruction {
static constexpr OpcodeID opcodeID = op_nstricteq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**nstricteq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpNstricteq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNstricteq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNstricteq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNstricteq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpLess : public Instruction {
static constexpr OpcodeID opcodeID = op_less;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**less"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpLess(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLess(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLess(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpLess decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpLesseq : public Instruction {
static constexpr OpcodeID opcodeID = op_lesseq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**lesseq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpLesseq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLesseq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpLesseq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpLesseq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpGreater : public Instruction {
static constexpr OpcodeID opcodeID = op_greater;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**greater"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpGreater(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGreater(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGreater(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGreater decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpGreatereq : public Instruction {
static constexpr OpcodeID opcodeID = op_greatereq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**greatereq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpGreatereq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGreatereq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpGreatereq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpGreatereq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpBelow : public Instruction {
static constexpr OpcodeID opcodeID = op_below;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**below"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpBelow(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBelow(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBelow(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpBelow decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpBeloweq : public Instruction {
static constexpr OpcodeID opcodeID = op_beloweq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**beloweq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpBeloweq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBeloweq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpBeloweq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpBeloweq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpMod : public Instruction {
static constexpr OpcodeID opcodeID = op_mod;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**mod"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpMod(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpMod(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpMod(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpMod decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpPow : public Instruction {
static constexpr OpcodeID opcodeID = op_pow;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**pow"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpPow(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPow(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPow(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPow decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpUrshift : public Instruction {
static constexpr OpcodeID opcodeID = op_urshift;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**urshift"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
OpUrshift(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpUrshift(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpUrshift(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpUrshift decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct OpEqNull : public Instruction {
static constexpr OpcodeID opcodeID = op_eq_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**eq_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpEqNull(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEqNull(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpEqNull(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpEqNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpNeqNull : public Instruction {
static constexpr OpcodeID opcodeID = op_neq_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**neq_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpNeqNull(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNeqNull(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNeqNull(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNeqNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpToString : public Instruction {
static constexpr OpcodeID opcodeID = op_to_string;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**to_string"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpToString(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToString(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpToString(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpToString decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpUnsigned : public Instruction {
static constexpr OpcodeID opcodeID = op_unsigned;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**unsigned"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpUnsigned(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpUnsigned(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpUnsigned(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpUnsigned decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsEmpty : public Instruction {
static constexpr OpcodeID opcodeID = op_is_empty;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_empty"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsEmpty(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsEmpty(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsEmpty(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsEmpty decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpTypeofIsUndefined : public Instruction {
static constexpr OpcodeID opcodeID = op_typeof_is_undefined;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**typeof_is_undefined"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpTypeofIsUndefined(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeofIsUndefined(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeofIsUndefined(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTypeofIsUndefined decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpTypeofIsObject : public Instruction {
static constexpr OpcodeID opcodeID = op_typeof_is_object;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**typeof_is_object"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpTypeofIsObject(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeofIsObject(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeofIsObject(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTypeofIsObject decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpTypeofIsFunction : public Instruction {
static constexpr OpcodeID opcodeID = op_typeof_is_function;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**typeof_is_function"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpTypeofIsFunction(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeofIsFunction(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeofIsFunction(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTypeofIsFunction decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsUndefinedOrNull : public Instruction {
static constexpr OpcodeID opcodeID = op_is_undefined_or_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_undefined_or_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsUndefinedOrNull(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsUndefinedOrNull(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsUndefinedOrNull(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsUndefinedOrNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsBoolean : public Instruction {
static constexpr OpcodeID opcodeID = op_is_boolean;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_boolean"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsBoolean(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsBoolean(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsBoolean(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsBoolean decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsNumber : public Instruction {
static constexpr OpcodeID opcodeID = op_is_number;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_number"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsNumber(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsNumber(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsNumber(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsNumber decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsBigInt : public Instruction {
static constexpr OpcodeID opcodeID = op_is_big_int;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_big_int"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsBigInt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsBigInt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsBigInt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsBigInt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsObject : public Instruction {
static constexpr OpcodeID opcodeID = op_is_object;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_object"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsObject(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsObject(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsObject(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsObject decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsCallable : public Instruction {
static constexpr OpcodeID opcodeID = op_is_callable;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_callable"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsCallable(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsCallable(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsCallable(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsCallable decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIsConstructor : public Instruction {
static constexpr OpcodeID opcodeID = op_is_constructor;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_constructor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpIsConstructor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsConstructor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsConstructor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsConstructor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpNot : public Instruction {
static constexpr OpcodeID opcodeID = op_not;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**not"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
OpNot(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNot(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpNot(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpNot decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct OpIdentityWithProfile : public Instruction {
static constexpr OpcodeID opcodeID = op_identity_with_profile;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned topProfile, unsigned bottomProfile)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, srcDst, topProfile, bottomProfile);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned topProfile, unsigned bottomProfile)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, srcDst, topProfile, bottomProfile);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned topProfile, unsigned bottomProfile)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, srcDst, topProfile, bottomProfile))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, srcDst, topProfile, bottomProfile))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, srcDst, topProfile, bottomProfile);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& srcDst, unsigned& topProfile, unsigned& bottomProfile)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(srcDst)
&& Fits<unsigned, __size>::check(topProfile)
&& Fits<unsigned, __size>::check(bottomProfile)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister srcDst, unsigned topProfile, unsigned bottomProfile)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, srcDst, topProfile, bottomProfile)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(srcDst));
gen->write(Fits<unsigned, __size>::convert(topProfile));
gen->write(Fits<unsigned, __size>::convert(bottomProfile));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**identity_with_profile"[2 - __sizeShiftAmount]);
dumper->dumpOperand("srcDst", m_srcDst, true);
dumper->dumpOperand("topProfile", m_topProfile, false);
dumper->dumpOperand("bottomProfile", m_bottomProfile, false);
}
OpIdentityWithProfile(const uint8_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_topProfile(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_bottomProfile(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIdentityWithProfile(const uint16_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_topProfile(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_bottomProfile(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIdentityWithProfile(const uint32_t* stream)
: m_srcDst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_topProfile(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_bottomProfile(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIdentityWithProfile decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSrcDst<OpcodeSize::Wide16>(value, func);
else
setSrcDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTopProfile(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setTopProfile<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setTopProfile<OpcodeSize::Wide16>(value, func);
else
setTopProfile<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTopProfile(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setBottomProfile(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBottomProfile<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBottomProfile<OpcodeSize::Wide16>(value, func);
else
setBottomProfile<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBottomProfile(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_srcDst;
unsigned m_topProfile;
unsigned m_bottomProfile;
};
struct OpOverridesHasInstance : public Instruction {
static constexpr OpcodeID opcodeID = op_overrides_has_instance;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, constructor, hasInstanceValue);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, constructor, hasInstanceValue);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, constructor, hasInstanceValue))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, constructor, hasInstanceValue))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, constructor, hasInstanceValue);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& constructor, VirtualRegister& hasInstanceValue)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(constructor)
&& Fits<VirtualRegister, __size>::check(hasInstanceValue)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, constructor, hasInstanceValue)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(constructor));
gen->write(Fits<VirtualRegister, __size>::convert(hasInstanceValue));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**overrides_has_instance"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("constructor", m_constructor, false);
dumper->dumpOperand("hasInstanceValue", m_hasInstanceValue, false);
}
OpOverridesHasInstance(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_constructor(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_hasInstanceValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpOverridesHasInstance(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_constructor(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_hasInstanceValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpOverridesHasInstance(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_constructor(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_hasInstanceValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpOverridesHasInstance decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setConstructor(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setConstructor<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setConstructor<OpcodeSize::Wide16>(value, func);
else
setConstructor<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setConstructor(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setHasInstanceValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setHasInstanceValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setHasInstanceValue<OpcodeSize::Wide16>(value, func);
else
setHasInstanceValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setHasInstanceValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_constructor;
VirtualRegister m_hasInstanceValue;
};
struct OpInstanceof : public Instruction {
static constexpr OpcodeID opcodeID = op_instanceof;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister prototype)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, value, prototype);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister prototype)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, value, prototype);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister prototype)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, value, prototype))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, value, prototype))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, value, prototype);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& value, VirtualRegister& prototype)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<VirtualRegister, __size>::check(prototype)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister prototype)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, value, prototype)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<VirtualRegister, __size>::convert(prototype));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**instanceof"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("prototype", m_prototype, false);
}
OpInstanceof(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_prototype(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInstanceof(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_prototype(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInstanceof(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_prototype(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpInstanceof decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPrototype(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setPrototype<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setPrototype<OpcodeSize::Wide16>(value, func);
else
setPrototype<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPrototype(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_value;
VirtualRegister m_prototype;
};
struct OpInstanceofCustom : public Instruction {
static constexpr OpcodeID opcodeID = op_instanceof_custom;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, value, constructor, hasInstanceValue);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, value, constructor, hasInstanceValue);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, value, constructor, hasInstanceValue))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, value, constructor, hasInstanceValue))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, value, constructor, hasInstanceValue);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& value, VirtualRegister& constructor, VirtualRegister& hasInstanceValue)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<VirtualRegister, __size>::check(constructor)
&& Fits<VirtualRegister, __size>::check(hasInstanceValue)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value, VirtualRegister constructor, VirtualRegister hasInstanceValue)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, value, constructor, hasInstanceValue)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<VirtualRegister, __size>::convert(constructor));
gen->write(Fits<VirtualRegister, __size>::convert(hasInstanceValue));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**instanceof_custom"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("constructor", m_constructor, false);
dumper->dumpOperand("hasInstanceValue", m_hasInstanceValue, false);
}
OpInstanceofCustom(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_constructor(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_hasInstanceValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInstanceofCustom(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_constructor(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_hasInstanceValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInstanceofCustom(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_constructor(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_hasInstanceValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpInstanceofCustom decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setConstructor(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setConstructor<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setConstructor<OpcodeSize::Wide16>(value, func);
else
setConstructor<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setConstructor(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setHasInstanceValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setHasInstanceValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setHasInstanceValue<OpcodeSize::Wide16>(value, func);
else
setHasInstanceValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setHasInstanceValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_value;
VirtualRegister m_constructor;
VirtualRegister m_hasInstanceValue;
};
struct OpTypeof : public Instruction {
static constexpr OpcodeID opcodeID = op_typeof;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**typeof"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("value", m_value, false);
}
OpTypeof(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeof(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpTypeof(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpTypeof decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_value;
};
struct OpIsCellWithType : public Instruction {
static constexpr OpcodeID opcodeID = op_is_cell_with_type;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, JSType type)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand, type);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, JSType type)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand, type);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, JSType type)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand, type))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand, type))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand, type);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand, JSType& type)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& Fits<JSType, __size>::check(type)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand, JSType type)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand, type)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
gen->write(Fits<JSType, __size>::convert(type));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**is_cell_with_type"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
dumper->dumpOperand("type", m_type, false);
}
OpIsCellWithType(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_type(Fits<JSType, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsCellWithType(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_type(Fits<JSType, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpIsCellWithType(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_type(Fits<JSType, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpIsCellWithType decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setType(JSType value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setType<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setType<OpcodeSize::Wide16>(value, func);
else
setType<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setType(JSType value, Functor func)
{
if (!Fits<JSType, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<JSType, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
JSType m_type;
};
struct OpInById : public Instruction {
static constexpr OpcodeID opcodeID = op_in_by_id;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, unsigned& property)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**in_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
}
OpInById(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInById(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpInById(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpInById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
unsigned m_property;
};
struct OpPutByIdWithThis : public Instruction {
static constexpr OpcodeID opcodeID = op_put_by_id_with_this;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, unsigned property, VirtualRegister value, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, thisValue, property, value, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, unsigned property, VirtualRegister value, ECMAMode ecmaMode)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, thisValue, property, value, ecmaMode);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, unsigned property, VirtualRegister value, ECMAMode ecmaMode)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, thisValue, property, value, ecmaMode))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, thisValue, property, value, ecmaMode))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, thisValue, property, value, ecmaMode);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& thisValue, unsigned& property, VirtualRegister& value, ECMAMode& ecmaMode)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<unsigned, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, unsigned property, VirtualRegister value, ECMAMode ecmaMode)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, thisValue, property, value, ecmaMode)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_by_id_with_this"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpPutByIdWithThis(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByIdWithThis(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByIdWithThis(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutByIdWithThis decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
VirtualRegister m_base;
VirtualRegister m_thisValue;
unsigned m_property;
VirtualRegister m_value;
ECMAMode m_ecmaMode;
};
struct OpDelById : public Instruction {
static constexpr OpcodeID opcodeID = op_del_by_id;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, ECMAMode ecmaMode)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, ecmaMode);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, ECMAMode ecmaMode)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, ecmaMode))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, ecmaMode))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, ecmaMode);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, unsigned& property, ECMAMode& ecmaMode)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, unsigned property, ECMAMode ecmaMode)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, ecmaMode)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**del_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpDelById(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDelById(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDelById(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpDelById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
unsigned m_property;
ECMAMode m_ecmaMode;
};
struct OpPutByValWithThis : public Instruction {
static constexpr OpcodeID opcodeID = op_put_by_val_with_this;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, thisValue, property, value, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, thisValue, property, value, ecmaMode);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, thisValue, property, value, ecmaMode))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, thisValue, property, value, ecmaMode))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, thisValue, property, value, ecmaMode);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& thisValue, VirtualRegister& property, VirtualRegister& value, ECMAMode& ecmaMode)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(thisValue)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister thisValue, VirtualRegister property, VirtualRegister value, ECMAMode ecmaMode)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, thisValue, property, value, ecmaMode)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(thisValue));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_by_val_with_this"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("thisValue", m_thisValue, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpPutByValWithThis(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByValWithThis(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutByValWithThis(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_thisValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutByValWithThis decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setThisValue<OpcodeSize::Wide16>(value, func);
else
setThisValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setThisValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
VirtualRegister m_base;
VirtualRegister m_thisValue;
VirtualRegister m_property;
VirtualRegister m_value;
ECMAMode m_ecmaMode;
};
struct OpDelByVal : public Instruction {
static constexpr OpcodeID opcodeID = op_del_by_val;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, ECMAMode ecmaMode)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, base, property, ecmaMode);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, ECMAMode ecmaMode)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, base, property, ecmaMode);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, ECMAMode ecmaMode)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, ecmaMode))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, base, property, ecmaMode))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, base, property, ecmaMode);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& base, VirtualRegister& property, ECMAMode& ecmaMode)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<ECMAMode, __size>::check(ecmaMode)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister base, VirtualRegister property, ECMAMode ecmaMode)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, base, property, ecmaMode)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<ECMAMode, __size>::convert(ecmaMode));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**del_by_val"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("base", m_base, false);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("ecmaMode", m_ecmaMode, false);
}
OpDelByVal(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDelByVal(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpDelByVal(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_ecmaMode(Fits<ECMAMode, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpDelByVal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setEcmaMode<OpcodeSize::Wide16>(value, func);
else
setEcmaMode<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setEcmaMode(ECMAMode value, Functor func)
{
if (!Fits<ECMAMode, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<ECMAMode, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_base;
VirtualRegister m_property;
ECMAMode m_ecmaMode;
};
struct OpPutGetterById : public Instruction {
static constexpr OpcodeID opcodeID = op_put_getter_by_id;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, attributes, accessor);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, attributes, accessor);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, attributes, accessor);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, unsigned& property, unsigned& attributes, VirtualRegister& accessor)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<unsigned, __size>::check(attributes)
&& Fits<VirtualRegister, __size>::check(accessor)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, attributes, accessor)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(attributes));
gen->write(Fits<VirtualRegister, __size>::convert(accessor));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_getter_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("attributes", m_attributes, false);
dumper->dumpOperand("accessor", m_accessor, false);
}
OpPutGetterById(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutGetterById(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutGetterById(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutGetterById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide16>(value, func);
else
setAttributes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide16>(value, func);
else
setAccessor<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
unsigned m_property;
unsigned m_attributes;
VirtualRegister m_accessor;
};
struct OpPutSetterById : public Instruction {
static constexpr OpcodeID opcodeID = op_put_setter_by_id;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, attributes, accessor);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, attributes, accessor);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, attributes, accessor);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, unsigned& property, unsigned& attributes, VirtualRegister& accessor)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<unsigned, __size>::check(attributes)
&& Fits<VirtualRegister, __size>::check(accessor)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister accessor)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, attributes, accessor)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(attributes));
gen->write(Fits<VirtualRegister, __size>::convert(accessor));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_setter_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("attributes", m_attributes, false);
dumper->dumpOperand("accessor", m_accessor, false);
}
OpPutSetterById(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutSetterById(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutSetterById(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutSetterById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide16>(value, func);
else
setAttributes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide16>(value, func);
else
setAccessor<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
unsigned m_property;
unsigned m_attributes;
VirtualRegister m_accessor;
};
struct OpPutGetterSetterById : public Instruction {
static constexpr OpcodeID opcodeID = op_put_getter_setter_by_id;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister getter, VirtualRegister setter)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, attributes, getter, setter);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister getter, VirtualRegister setter)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, attributes, getter, setter);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister getter, VirtualRegister setter)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, getter, setter))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, getter, setter))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, attributes, getter, setter);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, unsigned& property, unsigned& attributes, VirtualRegister& getter, VirtualRegister& setter)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<unsigned, __size>::check(property)
&& Fits<unsigned, __size>::check(attributes)
&& Fits<VirtualRegister, __size>::check(getter)
&& Fits<VirtualRegister, __size>::check(setter)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, unsigned property, unsigned attributes, VirtualRegister getter, VirtualRegister setter)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, attributes, getter, setter)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<unsigned, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(attributes));
gen->write(Fits<VirtualRegister, __size>::convert(getter));
gen->write(Fits<VirtualRegister, __size>::convert(setter));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_getter_setter_by_id"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("attributes", m_attributes, false);
dumper->dumpOperand("getter", m_getter, false);
dumper->dumpOperand("setter", m_setter, false);
}
OpPutGetterSetterById(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_getter(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_setter(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutGetterSetterById(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_getter(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_setter(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutGetterSetterById(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_getter(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_setter(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutGetterSetterById decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide16>(value, func);
else
setAttributes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setGetter(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setGetter<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setGetter<OpcodeSize::Wide16>(value, func);
else
setGetter<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGetter(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSetter(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setSetter<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setSetter<OpcodeSize::Wide16>(value, func);
else
setSetter<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSetter(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
unsigned m_property;
unsigned m_attributes;
VirtualRegister m_getter;
VirtualRegister m_setter;
};
struct OpPutGetterByVal : public Instruction {
static constexpr OpcodeID opcodeID = op_put_getter_by_val;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, base, property, attributes, accessor);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, base, property, attributes, accessor);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, base, property, attributes, accessor))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, base, property, attributes, accessor);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& base, VirtualRegister& property, unsigned& attributes, VirtualRegister& accessor)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<OpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(base)
&& Fits<VirtualRegister, __size>::check(property)
&& Fits<unsigned, __size>::check(attributes)
&& Fits<VirtualRegister, __size>::check(accessor)
&& (__size == OpcodeSize::Wide16 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<OpcodeID, OpcodeSize::Narrow>::check(op_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister base, VirtualRegister property, unsigned attributes, VirtualRegister accessor)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, base, property, attributes, accessor)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(op_wide32));
gen->write(Fits<OpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(base));
gen->write(Fits<VirtualRegister, __size>::convert(property));
gen->write(Fits<unsigned, __size>::convert(attributes));
gen->write(Fits<VirtualRegister, __size>::convert(accessor));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**put_getter_by_val"[2 - __sizeShiftAmount]);
dumper->dumpOperand("base", m_base, true);
dumper->dumpOperand("property", m_property, false);
dumper->dumpOperand("attributes", m_attributes, false);
dumper->dumpOperand("accessor", m_accessor, false);
}
OpPutGetterByVal(const uint8_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutGetterByVal(const uint16_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
OpPutGetterByVal(const uint32_t* stream)
: m_base(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_property(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_attributes(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_accessor(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static OpPutGetterByVal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == op_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == op_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setBase<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setBase<OpcodeSize::Wide16>(value, func);
else
setBase<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setBase(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setProperty<OpcodeSize::Wide16>(value, func);
else
setProperty<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setProperty(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAttributes<OpcodeSize::Wide16>(value, func);
else
setAttributes<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAttributes(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (isWide32<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide32>(value, func);
else if (isWide16<JSOpcodeTraits>())
setAccessor<OpcodeSize::Wide16>(value, func);
else
setAccessor<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setAccessor(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_base;
VirtualRegister m_property;
unsigned m_attributes;
VirtualRegister m_accessor;
};
#if ENABLE(WEBASSEMBLY)
struct WasmAtomicFence : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_atomic_fence;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**atomic_fence"[2 - __sizeShiftAmount]);
}
WasmAtomicFence(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmAtomicFence(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmAtomicFence(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmAtomicFence decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmI32Sub : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_sub;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_sub"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Sub(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Sub(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Sub(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Sub decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Mul : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_mul;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_mul"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Mul(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Mul(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Mul(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Mul decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32DivS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_div_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_div_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32DivS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32DivS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32DivS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32DivS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32DivU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_div_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_div_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32DivU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32DivU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32DivU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32DivU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32RemS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_rem_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_rem_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32RemS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32RemS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32RemS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32RemS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32RemU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_rem_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_rem_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32RemU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32RemU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32RemU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32RemU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32And : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_and;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_and"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32And(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32And(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32And(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32And decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Or : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_or;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_or"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Or(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Or(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Or(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Or decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Xor : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_xor;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_xor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Xor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Xor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Xor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Xor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Shl : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_shl;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_shl"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Shl(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Shl(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Shl(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Shl decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32ShrU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_shr_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_shr_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32ShrU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32ShrU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32ShrU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32ShrU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32ShrS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_shr_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_shr_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32ShrS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32ShrS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32ShrS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32ShrS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Rotr : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_rotr;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_rotr"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Rotr(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Rotr(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Rotr(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Rotr decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Rotl : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_rotl;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_rotl"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Rotl(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Rotl(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Rotl(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Rotl decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Eq : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_eq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_eq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Eq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Eq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Eq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Eq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Ne : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_ne;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_ne"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Ne(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Ne(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Ne(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Ne decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32LtS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_lt_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_lt_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32LtS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LtS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LtS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32LtS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32LeS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_le_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_le_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32LeS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LeS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LeS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32LeS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32LtU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_lt_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_lt_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32LtU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LtU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LtU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32LtU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32LeU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_le_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_le_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32LeU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LeU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32LeU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32LeU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32GtS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_gt_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_gt_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32GtS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GtS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GtS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32GtS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32GeS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_ge_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_ge_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32GeS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GeS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GeS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32GeS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32GtU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_gt_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_gt_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32GtU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GtU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GtU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32GtU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32GeU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_ge_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_ge_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32GeU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GeU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32GeU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32GeU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32Clz : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_clz;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_clz"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32Clz(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Clz(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Clz(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Clz decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32Ctz : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_ctz;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_ctz"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32Ctz(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Ctz(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Ctz(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Ctz decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32Popcnt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_popcnt;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_popcnt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32Popcnt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Popcnt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Popcnt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Popcnt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32Eqz : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_eqz;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_eqz"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32Eqz(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Eqz(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Eqz(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Eqz decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64Add : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_add;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_add"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Add(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Add(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Add(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Add decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Sub : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_sub;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_sub"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Sub(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Sub(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Sub(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Sub decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Mul : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_mul;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_mul"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Mul(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Mul(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Mul(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Mul decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64DivS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_div_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_div_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64DivS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64DivS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64DivS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64DivS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64DivU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_div_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_div_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64DivU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64DivU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64DivU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64DivU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64RemS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_rem_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_rem_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64RemS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64RemS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64RemS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64RemS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64RemU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_rem_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_rem_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64RemU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64RemU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64RemU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64RemU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64And : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_and;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_and"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64And(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64And(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64And(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64And decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Or : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_or;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_or"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Or(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Or(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Or(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Or decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Xor : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_xor;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_xor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Xor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Xor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Xor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Xor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Shl : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_shl;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_shl"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Shl(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Shl(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Shl(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Shl decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64ShrU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_shr_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_shr_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64ShrU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ShrU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ShrU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64ShrU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64ShrS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_shr_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_shr_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64ShrS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ShrS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ShrS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64ShrS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Rotr : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_rotr;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_rotr"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Rotr(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Rotr(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Rotr(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Rotr decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Rotl : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_rotl;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_rotl"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Rotl(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Rotl(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Rotl(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Rotl decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Eq : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_eq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_eq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Eq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Eq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Eq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Eq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Ne : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_ne;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_ne"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64Ne(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Ne(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Ne(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Ne decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64LtS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_lt_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_lt_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64LtS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LtS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LtS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64LtS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64LeS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_le_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_le_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64LeS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LeS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LeS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64LeS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64LtU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_lt_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_lt_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64LtU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LtU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LtU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64LtU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64LeU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_le_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_le_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64LeU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LeU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64LeU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64LeU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64GtS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_gt_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_gt_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64GtS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GtS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GtS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64GtS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64GeS : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_ge_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_ge_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64GeS(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GeS(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GeS(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64GeS decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64GtU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_gt_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_gt_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64GtU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GtU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GtU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64GtU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64GeU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_ge_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_ge_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI64GeU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GeU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64GeU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64GeU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI64Clz : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_clz;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_clz"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64Clz(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Clz(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Clz(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Clz decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64Ctz : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_ctz;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_ctz"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64Ctz(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Ctz(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Ctz(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Ctz decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64Popcnt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_popcnt;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_popcnt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64Popcnt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Popcnt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Popcnt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Popcnt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64Eqz : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_eqz;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_eqz"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64Eqz(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Eqz(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Eqz(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Eqz decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Add : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_add;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_add"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Add(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Add(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Add(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Add decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Sub : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_sub;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_sub"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Sub(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Sub(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Sub(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Sub decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Mul : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_mul;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_mul"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Mul(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Mul(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Mul(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Mul decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Div : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_div;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_div"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Div(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Div(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Div(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Div decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Min : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_min;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_min"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Min(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Min(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Min(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Min decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Max : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_max;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_max"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Max(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Max(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Max(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Max decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Abs : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_abs;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_abs"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32Abs(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Abs(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Abs(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Abs decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Neg : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_neg;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_neg"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32Neg(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Neg(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Neg(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Neg decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Copysign : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_copysign;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_copysign"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Copysign(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Copysign(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Copysign(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Copysign decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Ceil : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_ceil;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_ceil"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32Ceil(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Ceil(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Ceil(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Ceil decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Floor : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_floor;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_floor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32Floor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Floor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Floor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Floor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Trunc : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_trunc;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_trunc"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32Trunc(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Trunc(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Trunc(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Trunc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Nearest : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_nearest;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_nearest"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32Nearest(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Nearest(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Nearest(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Nearest decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Sqrt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_sqrt;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_sqrt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32Sqrt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Sqrt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Sqrt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Sqrt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32Eq : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_eq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_eq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Eq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Eq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Eq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Eq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Ne : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_ne;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_ne"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Ne(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Ne(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Ne(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Ne decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Lt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_lt;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_lt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Lt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Lt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Lt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Lt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Le : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_le;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_le"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Le(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Le(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Le(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Le decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Gt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_gt;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_gt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Gt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Gt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Gt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Gt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF32Ge : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_ge;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_ge"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF32Ge(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Ge(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32Ge(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32Ge decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Add : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_add;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_add"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Add(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Add(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Add(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Add decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Sub : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_sub;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_sub"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Sub(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Sub(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Sub(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Sub decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Mul : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_mul;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_mul"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Mul(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Mul(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Mul(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Mul decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Div : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_div;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_div"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Div(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Div(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Div(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Div decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Min : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_min;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_min"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Min(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Min(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Min(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Min decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Max : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_max;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_max"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Max(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Max(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Max(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Max decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Abs : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_abs;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_abs"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64Abs(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Abs(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Abs(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Abs decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64Neg : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_neg;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_neg"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64Neg(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Neg(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Neg(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Neg decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64Copysign : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_copysign;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_copysign"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Copysign(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Copysign(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Copysign(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Copysign decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Ceil : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_ceil;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_ceil"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64Ceil(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Ceil(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Ceil(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Ceil decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64Floor : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_floor;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_floor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64Floor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Floor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Floor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Floor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64Trunc : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_trunc;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_trunc"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64Trunc(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Trunc(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Trunc(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Trunc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64Nearest : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_nearest;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_nearest"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64Nearest(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Nearest(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Nearest(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Nearest decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64Sqrt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_sqrt;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_sqrt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64Sqrt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Sqrt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Sqrt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Sqrt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64Eq : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_eq;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_eq"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Eq(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Eq(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Eq(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Eq decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Ne : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_ne;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_ne"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Ne(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Ne(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Ne(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Ne decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Lt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_lt;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_lt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Lt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Lt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Lt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Lt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Le : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_le;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_le"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Le(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Le(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Le(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Le decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Gt : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_gt;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_gt"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Gt(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Gt(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Gt(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Gt decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmF64Ge : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_ge;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_ge"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmF64Ge(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Ge(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64Ge(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64Ge decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
struct WasmI32TruncSF32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_trunc_s_f32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_trunc_s_f32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32TruncSF32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncSF32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncSF32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32TruncSF32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32TruncSF64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_trunc_s_f64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_trunc_s_f64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32TruncSF64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncSF64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncSF64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32TruncSF64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32TruncUF32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_trunc_u_f32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_trunc_u_f32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32TruncUF32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncUF32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncUF32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32TruncUF32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32TruncUF64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_trunc_u_f64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_trunc_u_f64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32TruncUF64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncUF64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32TruncUF64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32TruncUF64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32WrapI64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_wrap_i64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_wrap_i64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32WrapI64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32WrapI64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32WrapI64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32WrapI64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64TruncSF32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_trunc_s_f32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_trunc_s_f32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64TruncSF32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncSF32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncSF32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64TruncSF32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64TruncSF64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_trunc_s_f64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_trunc_s_f64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64TruncSF64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncSF64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncSF64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64TruncSF64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64TruncUF32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_trunc_u_f32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_trunc_u_f32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64TruncUF32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncUF32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncUF32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64TruncUF32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64TruncUF64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_trunc_u_f64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_trunc_u_f64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64TruncUF64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncUF64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64TruncUF64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64TruncUF64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64ExtendSI32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_extend_s_i32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_extend_s_i32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64ExtendSI32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ExtendSI32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ExtendSI32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64ExtendSI32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64ExtendUI32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_extend_u_i32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_extend_u_i32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64ExtendUI32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ExtendUI32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ExtendUI32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64ExtendUI32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32ConvertSI32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_convert_s_i32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_convert_s_i32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32ConvertSI32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertSI32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertSI32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32ConvertSI32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32ConvertUI32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_convert_u_i32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_convert_u_i32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32ConvertUI32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertUI32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertUI32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32ConvertUI32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32ConvertSI64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_convert_s_i64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_convert_s_i64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32ConvertSI64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertSI64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertSI64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32ConvertSI64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32ConvertUI64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_convert_u_i64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_convert_u_i64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32ConvertUI64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertUI64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ConvertUI64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32ConvertUI64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32DemoteF64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_demote_f64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_demote_f64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32DemoteF64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32DemoteF64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32DemoteF64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32DemoteF64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF32ReinterpretI32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f32_reinterpret_i32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f32_reinterpret_i32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF32ReinterpretI32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ReinterpretI32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF32ReinterpretI32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF32ReinterpretI32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64ConvertSI32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_convert_s_i32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_convert_s_i32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64ConvertSI32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertSI32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertSI32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64ConvertSI32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64ConvertUI32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_convert_u_i32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_convert_u_i32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64ConvertUI32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertUI32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertUI32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64ConvertUI32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64ConvertSI64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_convert_s_i64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_convert_s_i64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64ConvertSI64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertSI64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertSI64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64ConvertSI64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64ConvertUI64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_convert_u_i64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_convert_u_i64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64ConvertUI64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertUI64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ConvertUI64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64ConvertUI64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64PromoteF32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_promote_f32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_promote_f32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64PromoteF32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64PromoteF32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64PromoteF32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64PromoteF32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmF64ReinterpretI64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_f64_reinterpret_i64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**f64_reinterpret_i64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmF64ReinterpretI64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ReinterpretI64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmF64ReinterpretI64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmF64ReinterpretI64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32ReinterpretF32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_reinterpret_f32;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_reinterpret_f32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32ReinterpretF32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32ReinterpretF32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32ReinterpretF32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32ReinterpretF32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64ReinterpretF64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_reinterpret_f64;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_reinterpret_f64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64ReinterpretF64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ReinterpretF64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64ReinterpretF64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64ReinterpretF64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32Extend8S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_extend8_s;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_extend8_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32Extend8S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Extend8S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Extend8S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Extend8S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI32Extend16S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_extend16_s;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_extend16_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI32Extend16S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Extend16S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Extend16S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Extend16S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64Extend8S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_extend8_s;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_extend8_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64Extend8S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Extend8S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Extend8S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Extend8S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64Extend16S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_extend16_s;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_extend16_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64Extend16S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Extend16S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Extend16S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Extend16S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmI64Extend32S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_extend32_s;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, operand);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, operand);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, operand))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, operand);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& operand)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(operand)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister operand)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, operand)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(operand));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_extend32_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("operand", m_operand, false);
}
WasmI64Extend32S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Extend32S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Extend32S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_operand(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Extend32S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOperand<OpcodeSize::Wide16>(value, func);
else
setOperand<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOperand(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_operand;
};
struct WasmThrowFromSlowPathTrampoline : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_throw_from_slow_path_trampoline;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**throw_from_slow_path_trampoline"[2 - __sizeShiftAmount]);
}
WasmThrowFromSlowPathTrampoline(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmThrowFromSlowPathTrampoline(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmThrowFromSlowPathTrampoline(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmThrowFromSlowPathTrampoline decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmThrowFromFaultHandlerTrampoline : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_throw_from_fault_handler_trampoline;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**throw_from_fault_handler_trampoline"[2 - __sizeShiftAmount]);
}
WasmThrowFromFaultHandlerTrampoline(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmThrowFromFaultHandlerTrampoline(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmThrowFromFaultHandlerTrampoline(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmThrowFromFaultHandlerTrampoline decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmCallReturnLocation : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call_return_location;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_return_location"[2 - __sizeShiftAmount]);
}
WasmCallReturnLocation(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallReturnLocation(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallReturnLocation(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCallReturnLocation decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmCallNoTlsReturnLocation : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call_no_tls_return_location;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_no_tls_return_location"[2 - __sizeShiftAmount]);
}
WasmCallNoTlsReturnLocation(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallNoTlsReturnLocation(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallNoTlsReturnLocation(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCallNoTlsReturnLocation decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmCallIndirectReturnLocation : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call_indirect_return_location;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_indirect_return_location"[2 - __sizeShiftAmount]);
}
WasmCallIndirectReturnLocation(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirectReturnLocation(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirectReturnLocation(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCallIndirectReturnLocation decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmCallIndirectNoTlsReturnLocation : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call_indirect_no_tls_return_location;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_indirect_no_tls_return_location"[2 - __sizeShiftAmount]);
}
WasmCallIndirectNoTlsReturnLocation(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirectNoTlsReturnLocation(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirectNoTlsReturnLocation(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCallIndirectNoTlsReturnLocation decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmWide16 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_wide16;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**wide16"[2 - __sizeShiftAmount]);
}
WasmWide16(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmWide16(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmWide16(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmWide16 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmWide32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_wide32;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**wide32"[2 - __sizeShiftAmount]);
}
WasmWide32(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmWide32(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmWide32(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmWide32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmEnter : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_enter;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**enter"[2 - __sizeShiftAmount]);
}
WasmEnter(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmEnter(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmEnter(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmEnter decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmNop : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_nop;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**nop"[2 - __sizeShiftAmount]);
}
WasmNop(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmNop(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmNop(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmNop decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmLoopHint : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_loop_hint;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**loop_hint"[2 - __sizeShiftAmount]);
}
WasmLoopHint(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoopHint(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoopHint(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmLoopHint decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmMov : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_mov;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, src);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, src);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, src))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, src);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& src)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(src)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister src)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, src)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(src));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**mov"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("src", m_src, false);
}
WasmMov(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMov(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMov(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_src(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmMov decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSrc<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSrc<OpcodeSize::Wide16>(value, func);
else
setSrc<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrc(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_src;
};
struct WasmJtrue : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_jtrue;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, condition, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, condition, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, condition, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& condition, WasmBoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(condition)
&& Fits<WasmBoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, condition, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(condition));
gen->write(Fits<WasmBoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jtrue"[2 - __sizeShiftAmount]);
dumper->dumpOperand("condition", m_condition, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
WasmJtrue(const uint8_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmJtrue(const uint16_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmJtrue(const uint32_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmJtrue decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setCondition<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setCondition<OpcodeSize::Wide16>(value, func);
else
setCondition<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(WasmBoundLabel value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(WasmBoundLabel value, Functor func)
{
if (!Fits<WasmBoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<WasmBoundLabel, size>::convert(value);
}
VirtualRegister m_condition;
WasmBoundLabel m_targetLabel;
};
struct WasmJfalse : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_jfalse;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, condition, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, condition, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, condition, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, condition, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& condition, WasmBoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(condition)
&& Fits<WasmBoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister condition, WasmBoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, condition, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(condition));
gen->write(Fits<WasmBoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jfalse"[2 - __sizeShiftAmount]);
dumper->dumpOperand("condition", m_condition, true);
dumper->dumpOperand("targetLabel", m_targetLabel, false);
}
WasmJfalse(const uint8_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmJfalse(const uint16_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmJfalse(const uint32_t* stream)
: m_condition(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmJfalse decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setCondition<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setCondition<OpcodeSize::Wide16>(value, func);
else
setCondition<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetLabel(WasmBoundLabel value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(WasmBoundLabel value, Functor func)
{
if (!Fits<WasmBoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<WasmBoundLabel, size>::convert(value);
}
VirtualRegister m_condition;
WasmBoundLabel m_targetLabel;
};
struct WasmJmp : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_jmp;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, WasmBoundLabel targetLabel)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, targetLabel);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, WasmBoundLabel targetLabel)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, targetLabel);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, WasmBoundLabel targetLabel)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, targetLabel))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, targetLabel))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, targetLabel);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, WasmBoundLabel& targetLabel)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<WasmBoundLabel, __size>::check(targetLabel)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, WasmBoundLabel targetLabel)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, targetLabel)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<WasmBoundLabel, __size>::convert(targetLabel));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**jmp"[2 - __sizeShiftAmount]);
dumper->dumpOperand("targetLabel", m_targetLabel, true);
}
WasmJmp(const uint8_t* stream)
: m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmJmp(const uint16_t* stream)
: m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmJmp(const uint32_t* stream)
: m_targetLabel(Fits<WasmBoundLabel, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmJmp decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setTargetLabel(WasmBoundLabel value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTargetLabel<OpcodeSize::Wide16>(value, func);
else
setTargetLabel<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetLabel(WasmBoundLabel value, Functor func)
{
if (!Fits<WasmBoundLabel, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<WasmBoundLabel, size>::convert(value);
}
WasmBoundLabel m_targetLabel;
};
struct WasmRet : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_ret;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**ret"[2 - __sizeShiftAmount]);
}
WasmRet(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRet(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRet(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmRet decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmSwitch : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_switch;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister scrutinee, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, scrutinee, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister scrutinee, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, scrutinee, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister scrutinee, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, scrutinee, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, scrutinee, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, scrutinee, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& scrutinee, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(scrutinee)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister scrutinee, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, scrutinee, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(scrutinee));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**switch"[2 - __sizeShiftAmount]);
dumper->dumpOperand("scrutinee", m_scrutinee, true);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmSwitch(const uint8_t* stream)
: m_scrutinee(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSwitch(const uint16_t* stream)
: m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSwitch(const uint32_t* stream)
: m_scrutinee(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmSwitch decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setScrutinee<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setScrutinee<OpcodeSize::Wide16>(value, func);
else
setScrutinee<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setScrutinee(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_scrutinee;
unsigned m_tableIndex;
};
struct WasmUnreachable : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_unreachable;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**unreachable"[2 - __sizeShiftAmount]);
}
WasmUnreachable(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmUnreachable(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmUnreachable(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmUnreachable decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmRetVoid : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_ret_void;
static constexpr size_t length = 1;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**ret_void"[2 - __sizeShiftAmount]);
}
WasmRetVoid(const uint8_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRetVoid(const uint16_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRetVoid(const uint32_t* stream)
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmRetVoid decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
};
struct WasmDropKeep : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_drop_keep;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned startOffset, unsigned dropCount, unsigned keepCount)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, startOffset, dropCount, keepCount);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned startOffset, unsigned dropCount, unsigned keepCount)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, startOffset, dropCount, keepCount);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned startOffset, unsigned dropCount, unsigned keepCount)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, startOffset, dropCount, keepCount))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, startOffset, dropCount, keepCount))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, startOffset, dropCount, keepCount);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& startOffset, unsigned& dropCount, unsigned& keepCount)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(startOffset)
&& Fits<unsigned, __size>::check(dropCount)
&& Fits<unsigned, __size>::check(keepCount)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned startOffset, unsigned dropCount, unsigned keepCount)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, startOffset, dropCount, keepCount)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(startOffset));
gen->write(Fits<unsigned, __size>::convert(dropCount));
gen->write(Fits<unsigned, __size>::convert(keepCount));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**drop_keep"[2 - __sizeShiftAmount]);
dumper->dumpOperand("startOffset", m_startOffset, true);
dumper->dumpOperand("dropCount", m_dropCount, false);
dumper->dumpOperand("keepCount", m_keepCount, false);
}
WasmDropKeep(const uint8_t* stream)
: m_startOffset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_dropCount(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_keepCount(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmDropKeep(const uint16_t* stream)
: m_startOffset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_dropCount(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_keepCount(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmDropKeep(const uint32_t* stream)
: m_startOffset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_dropCount(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_keepCount(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmDropKeep decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setStartOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setStartOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setStartOffset<OpcodeSize::Wide16>(value, func);
else
setStartOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setStartOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setDropCount(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDropCount<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDropCount<OpcodeSize::Wide16>(value, func);
else
setDropCount<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDropCount(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setKeepCount(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setKeepCount<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setKeepCount<OpcodeSize::Wide16>(value, func);
else
setKeepCount<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setKeepCount(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
unsigned m_startOffset;
unsigned m_dropCount;
unsigned m_keepCount;
};
struct WasmRefIsNull : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_ref_is_null;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister ref)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, ref);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister ref)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, ref);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister ref)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, ref))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, ref))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, ref);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& ref)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(ref)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister ref)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, ref)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(ref));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**ref_is_null"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("ref", m_ref, false);
}
WasmRefIsNull(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_ref(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRefIsNull(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_ref(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRefIsNull(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_ref(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmRefIsNull decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRef(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRef<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRef<OpcodeSize::Wide16>(value, func);
else
setRef<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRef(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_ref;
};
struct WasmRefFunc : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_ref_func;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned functionIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, functionIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned functionIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, functionIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, unsigned functionIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, functionIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, functionIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, functionIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, unsigned& functionIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<unsigned, __size>::check(functionIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, unsigned functionIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, functionIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<unsigned, __size>::convert(functionIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**ref_func"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("functionIndex", m_functionIndex, false);
}
WasmRefFunc(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_functionIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRefFunc(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_functionIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmRefFunc(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_functionIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmRefFunc decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFunctionIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide16>(value, func);
else
setFunctionIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
unsigned m_functionIndex;
};
struct WasmGetGlobal : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_get_global;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, globalIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, globalIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, globalIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, globalIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, globalIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, unsigned& globalIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<unsigned, __size>::check(globalIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, globalIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<unsigned, __size>::convert(globalIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_global"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("globalIndex", m_globalIndex, false);
}
WasmGetGlobal(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_globalIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmGetGlobal(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_globalIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmGetGlobal(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_globalIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmGetGlobal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide16>(value, func);
else
setGlobalIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
unsigned m_globalIndex;
};
struct WasmSetGlobal : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_set_global;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, globalIndex, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, globalIndex, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, globalIndex, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& globalIndex, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(globalIndex)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, globalIndex, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(globalIndex));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**set_global"[2 - __sizeShiftAmount]);
dumper->dumpOperand("globalIndex", m_globalIndex, true);
dumper->dumpOperand("value", m_value, false);
}
WasmSetGlobal(const uint8_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobal(const uint16_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobal(const uint32_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmSetGlobal decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide16>(value, func);
else
setGlobalIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
unsigned m_globalIndex;
VirtualRegister m_value;
};
struct WasmSetGlobalRef : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_set_global_ref;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, globalIndex, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, globalIndex, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, globalIndex, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& globalIndex, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(globalIndex)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, globalIndex, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(globalIndex));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**set_global_ref"[2 - __sizeShiftAmount]);
dumper->dumpOperand("globalIndex", m_globalIndex, true);
dumper->dumpOperand("value", m_value, false);
}
WasmSetGlobalRef(const uint8_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobalRef(const uint16_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobalRef(const uint32_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmSetGlobalRef decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide16>(value, func);
else
setGlobalIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
unsigned m_globalIndex;
VirtualRegister m_value;
};
struct WasmGetGlobalPortableBinding : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_get_global_portable_binding;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, globalIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, globalIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, globalIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, globalIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, globalIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, unsigned& globalIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<unsigned, __size>::check(globalIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, unsigned globalIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, globalIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<unsigned, __size>::convert(globalIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**get_global_portable_binding"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("globalIndex", m_globalIndex, false);
}
WasmGetGlobalPortableBinding(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_globalIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmGetGlobalPortableBinding(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_globalIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmGetGlobalPortableBinding(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_globalIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmGetGlobalPortableBinding decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide16>(value, func);
else
setGlobalIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
unsigned m_globalIndex;
};
struct WasmSetGlobalPortableBinding : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_set_global_portable_binding;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, globalIndex, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, globalIndex, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, globalIndex, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& globalIndex, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(globalIndex)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, globalIndex, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(globalIndex));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**set_global_portable_binding"[2 - __sizeShiftAmount]);
dumper->dumpOperand("globalIndex", m_globalIndex, true);
dumper->dumpOperand("value", m_value, false);
}
WasmSetGlobalPortableBinding(const uint8_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobalPortableBinding(const uint16_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobalPortableBinding(const uint32_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmSetGlobalPortableBinding decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide16>(value, func);
else
setGlobalIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
unsigned m_globalIndex;
VirtualRegister m_value;
};
struct WasmSetGlobalRefPortableBinding : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_set_global_ref_portable_binding;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, globalIndex, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, globalIndex, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, globalIndex, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, globalIndex, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& globalIndex, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(globalIndex)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned globalIndex, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, globalIndex, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(globalIndex));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**set_global_ref_portable_binding"[2 - __sizeShiftAmount]);
dumper->dumpOperand("globalIndex", m_globalIndex, true);
dumper->dumpOperand("value", m_value, false);
}
WasmSetGlobalRefPortableBinding(const uint8_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobalRefPortableBinding(const uint16_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSetGlobalRefPortableBinding(const uint32_t* stream)
: m_globalIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmSetGlobalRefPortableBinding decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setGlobalIndex<OpcodeSize::Wide16>(value, func);
else
setGlobalIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setGlobalIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
unsigned m_globalIndex;
VirtualRegister m_value;
};
struct WasmTableGet : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_table_get;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, index, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, index, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, index, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, index, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, index, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& index, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(index)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister index, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, index, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(index));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**table_get"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("index", m_index, false);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmTableGet(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_index(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableGet(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableGet(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_index(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmTableGet decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_index;
unsigned m_tableIndex;
};
struct WasmTableSet : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_table_set;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister index, VirtualRegister value, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, index, value, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister index, VirtualRegister value, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, index, value, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister index, VirtualRegister value, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, index, value, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, index, value, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, index, value, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& index, VirtualRegister& value, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(index)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister index, VirtualRegister value, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, index, value, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(index));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**table_set"[2 - __sizeShiftAmount]);
dumper->dumpOperand("index", m_index, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmTableSet(const uint8_t* stream)
: m_index(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableSet(const uint16_t* stream)
: m_index(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableSet(const uint32_t* stream)
: m_index(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmTableSet decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setIndex<OpcodeSize::Wide16>(value, func);
else
setIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_index;
VirtualRegister m_value;
unsigned m_tableIndex;
};
struct WasmTableInit : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_table_init;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned elementIndex, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dstOffset, srcOffset, length, elementIndex, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned elementIndex, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dstOffset, srcOffset, length, elementIndex, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned elementIndex, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dstOffset, srcOffset, length, elementIndex, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dstOffset, srcOffset, length, elementIndex, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dstOffset, srcOffset, length, elementIndex, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dstOffset, VirtualRegister& srcOffset, VirtualRegister& length, unsigned& elementIndex, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dstOffset)
&& Fits<VirtualRegister, __size>::check(srcOffset)
&& Fits<VirtualRegister, __size>::check(length)
&& Fits<unsigned, __size>::check(elementIndex)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned elementIndex, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dstOffset, srcOffset, length, elementIndex, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dstOffset));
gen->write(Fits<VirtualRegister, __size>::convert(srcOffset));
gen->write(Fits<VirtualRegister, __size>::convert(length));
gen->write(Fits<unsigned, __size>::convert(elementIndex));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**table_init"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dstOffset", m_dstOffset, true);
dumper->dumpOperand("srcOffset", m_srcOffset, false);
dumper->dumpOperand("length", m_length, false);
dumper->dumpOperand("elementIndex", m_elementIndex, false);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmTableInit(const uint8_t* stream)
: m_dstOffset(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_srcOffset(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_elementIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableInit(const uint16_t* stream)
: m_dstOffset(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_srcOffset(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_elementIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableInit(const uint32_t* stream)
: m_dstOffset(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_srcOffset(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_elementIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmTableInit decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDstOffset(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDstOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDstOffset<OpcodeSize::Wide16>(value, func);
else
setDstOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDstOffset(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrcOffset(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSrcOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSrcOffset<OpcodeSize::Wide16>(value, func);
else
setSrcOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcOffset(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLength<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLength<OpcodeSize::Wide16>(value, func);
else
setLength<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setElementIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setElementIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setElementIndex<OpcodeSize::Wide16>(value, func);
else
setElementIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setElementIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dstOffset;
VirtualRegister m_srcOffset;
VirtualRegister m_length;
unsigned m_elementIndex;
unsigned m_tableIndex;
};
struct WasmElemDrop : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_elem_drop;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned elementIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, elementIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned elementIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, elementIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned elementIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, elementIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, elementIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, elementIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& elementIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(elementIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned elementIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, elementIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(elementIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**elem_drop"[2 - __sizeShiftAmount]);
dumper->dumpOperand("elementIndex", m_elementIndex, true);
}
WasmElemDrop(const uint8_t* stream)
: m_elementIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmElemDrop(const uint16_t* stream)
: m_elementIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmElemDrop(const uint32_t* stream)
: m_elementIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmElemDrop decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setElementIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setElementIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setElementIndex<OpcodeSize::Wide16>(value, func);
else
setElementIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setElementIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
unsigned m_elementIndex;
};
struct WasmTableSize : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_table_size;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**table_size"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmTableSize(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableSize(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableSize(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmTableSize decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
unsigned m_tableIndex;
};
struct WasmTableGrow : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_table_grow;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, fill, size, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, fill, size, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, fill, size, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, fill, size, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, fill, size, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& fill, VirtualRegister& size, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(fill)
&& Fits<VirtualRegister, __size>::check(size)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, fill, size, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(fill));
gen->write(Fits<VirtualRegister, __size>::convert(size));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**table_grow"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("fill", m_fill, false);
dumper->dumpOperand("size", m_size, false);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmTableGrow(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_fill(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_size(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableGrow(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_fill(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_size(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableGrow(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_fill(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_size(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmTableGrow decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFill(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setFill<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setFill<OpcodeSize::Wide16>(value, func);
else
setFill<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFill(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSize(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSize<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSize<OpcodeSize::Wide16>(value, func);
else
setSize<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSize(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_fill;
VirtualRegister m_size;
unsigned m_tableIndex;
};
struct WasmTableFill : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_table_fill;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister offset, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, offset, fill, size, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister offset, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, offset, fill, size, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister offset, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, offset, fill, size, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, offset, fill, size, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, offset, fill, size, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& offset, VirtualRegister& fill, VirtualRegister& size, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(fill)
&& Fits<VirtualRegister, __size>::check(size)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister offset, VirtualRegister fill, VirtualRegister size, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, offset, fill, size, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(fill));
gen->write(Fits<VirtualRegister, __size>::convert(size));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**table_fill"[2 - __sizeShiftAmount]);
dumper->dumpOperand("offset", m_offset, true);
dumper->dumpOperand("fill", m_fill, false);
dumper->dumpOperand("size", m_size, false);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmTableFill(const uint8_t* stream)
: m_offset(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_fill(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_size(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableFill(const uint16_t* stream)
: m_offset(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_fill(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_size(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableFill(const uint32_t* stream)
: m_offset(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_fill(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_size(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmTableFill decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setOffset(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setFill(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setFill<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setFill<OpcodeSize::Wide16>(value, func);
else
setFill<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFill(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSize(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSize<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSize<OpcodeSize::Wide16>(value, func);
else
setSize<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSize(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_offset;
VirtualRegister m_fill;
VirtualRegister m_size;
unsigned m_tableIndex;
};
struct WasmTableCopy : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_table_copy;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned dstTableIndex, unsigned srcTableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dstOffset, srcOffset, length, dstTableIndex, srcTableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned dstTableIndex, unsigned srcTableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dstOffset, srcOffset, length, dstTableIndex, srcTableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned dstTableIndex, unsigned srcTableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dstOffset, srcOffset, length, dstTableIndex, srcTableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dstOffset, srcOffset, length, dstTableIndex, srcTableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dstOffset, srcOffset, length, dstTableIndex, srcTableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dstOffset, VirtualRegister& srcOffset, VirtualRegister& length, unsigned& dstTableIndex, unsigned& srcTableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dstOffset)
&& Fits<VirtualRegister, __size>::check(srcOffset)
&& Fits<VirtualRegister, __size>::check(length)
&& Fits<unsigned, __size>::check(dstTableIndex)
&& Fits<unsigned, __size>::check(srcTableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dstOffset, VirtualRegister srcOffset, VirtualRegister length, unsigned dstTableIndex, unsigned srcTableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dstOffset, srcOffset, length, dstTableIndex, srcTableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dstOffset));
gen->write(Fits<VirtualRegister, __size>::convert(srcOffset));
gen->write(Fits<VirtualRegister, __size>::convert(length));
gen->write(Fits<unsigned, __size>::convert(dstTableIndex));
gen->write(Fits<unsigned, __size>::convert(srcTableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**table_copy"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dstOffset", m_dstOffset, true);
dumper->dumpOperand("srcOffset", m_srcOffset, false);
dumper->dumpOperand("length", m_length, false);
dumper->dumpOperand("dstTableIndex", m_dstTableIndex, false);
dumper->dumpOperand("srcTableIndex", m_srcTableIndex, false);
}
WasmTableCopy(const uint8_t* stream)
: m_dstOffset(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_srcOffset(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_dstTableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_srcTableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableCopy(const uint16_t* stream)
: m_dstOffset(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_srcOffset(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_dstTableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_srcTableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmTableCopy(const uint32_t* stream)
: m_dstOffset(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_srcOffset(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_dstTableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_srcTableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmTableCopy decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDstOffset(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDstOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDstOffset<OpcodeSize::Wide16>(value, func);
else
setDstOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDstOffset(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrcOffset(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSrcOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSrcOffset<OpcodeSize::Wide16>(value, func);
else
setSrcOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcOffset(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLength<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLength<OpcodeSize::Wide16>(value, func);
else
setLength<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setDstTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDstTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDstTableIndex<OpcodeSize::Wide16>(value, func);
else
setDstTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDstTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setSrcTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSrcTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSrcTableIndex<OpcodeSize::Wide16>(value, func);
else
setSrcTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dstOffset;
VirtualRegister m_srcOffset;
VirtualRegister m_length;
unsigned m_dstTableIndex;
unsigned m_srcTableIndex;
};
struct WasmCall : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, functionIndex, stackOffset, numberOfStackArgs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, functionIndex, stackOffset, numberOfStackArgs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, functionIndex, stackOffset, numberOfStackArgs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, functionIndex, stackOffset, numberOfStackArgs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, functionIndex, stackOffset, numberOfStackArgs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& functionIndex, unsigned& stackOffset, unsigned& numberOfStackArgs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(functionIndex)
&& Fits<unsigned, __size>::check(stackOffset)
&& Fits<unsigned, __size>::check(numberOfStackArgs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, functionIndex, stackOffset, numberOfStackArgs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(functionIndex));
gen->write(Fits<unsigned, __size>::convert(stackOffset));
gen->write(Fits<unsigned, __size>::convert(numberOfStackArgs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call"[2 - __sizeShiftAmount]);
dumper->dumpOperand("functionIndex", m_functionIndex, true);
dumper->dumpOperand("stackOffset", m_stackOffset, false);
dumper->dumpOperand("numberOfStackArgs", m_numberOfStackArgs, false);
}
WasmCall(const uint8_t* stream)
: m_functionIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCall(const uint16_t* stream)
: m_functionIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCall(const uint32_t* stream)
: m_functionIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCall decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setFunctionIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide16>(value, func);
else
setFunctionIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide16>(value, func);
else
setStackOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide16>(value, func);
else
setNumberOfStackArgs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
unsigned m_functionIndex;
unsigned m_stackOffset;
unsigned m_numberOfStackArgs;
};
struct WasmCallNoTls : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call_no_tls;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, functionIndex, stackOffset, numberOfStackArgs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, functionIndex, stackOffset, numberOfStackArgs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, functionIndex, stackOffset, numberOfStackArgs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, functionIndex, stackOffset, numberOfStackArgs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, functionIndex, stackOffset, numberOfStackArgs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& functionIndex, unsigned& stackOffset, unsigned& numberOfStackArgs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(functionIndex)
&& Fits<unsigned, __size>::check(stackOffset)
&& Fits<unsigned, __size>::check(numberOfStackArgs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned functionIndex, unsigned stackOffset, unsigned numberOfStackArgs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, functionIndex, stackOffset, numberOfStackArgs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(functionIndex));
gen->write(Fits<unsigned, __size>::convert(stackOffset));
gen->write(Fits<unsigned, __size>::convert(numberOfStackArgs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_no_tls"[2 - __sizeShiftAmount]);
dumper->dumpOperand("functionIndex", m_functionIndex, true);
dumper->dumpOperand("stackOffset", m_stackOffset, false);
dumper->dumpOperand("numberOfStackArgs", m_numberOfStackArgs, false);
}
WasmCallNoTls(const uint8_t* stream)
: m_functionIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallNoTls(const uint16_t* stream)
: m_functionIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallNoTls(const uint32_t* stream)
: m_functionIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCallNoTls decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setFunctionIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide16>(value, func);
else
setFunctionIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide16>(value, func);
else
setStackOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide16>(value, func);
else
setNumberOfStackArgs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
unsigned m_functionIndex;
unsigned m_stackOffset;
unsigned m_numberOfStackArgs;
};
struct WasmCallIndirect : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call_indirect;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& functionIndex, unsigned& signatureIndex, unsigned& stackOffset, unsigned& numberOfStackArgs, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(functionIndex)
&& Fits<unsigned, __size>::check(signatureIndex)
&& Fits<unsigned, __size>::check(stackOffset)
&& Fits<unsigned, __size>::check(numberOfStackArgs)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(functionIndex));
gen->write(Fits<unsigned, __size>::convert(signatureIndex));
gen->write(Fits<unsigned, __size>::convert(stackOffset));
gen->write(Fits<unsigned, __size>::convert(numberOfStackArgs));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_indirect"[2 - __sizeShiftAmount]);
dumper->dumpOperand("functionIndex", m_functionIndex, true);
dumper->dumpOperand("signatureIndex", m_signatureIndex, false);
dumper->dumpOperand("stackOffset", m_stackOffset, false);
dumper->dumpOperand("numberOfStackArgs", m_numberOfStackArgs, false);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmCallIndirect(const uint8_t* stream)
: m_functionIndex(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_signatureIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirect(const uint16_t* stream)
: m_functionIndex(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_signatureIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirect(const uint32_t* stream)
: m_functionIndex(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_signatureIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCallIndirect decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setFunctionIndex(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide16>(value, func);
else
setFunctionIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSignatureIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSignatureIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSignatureIndex<OpcodeSize::Wide16>(value, func);
else
setSignatureIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSignatureIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide16>(value, func);
else
setStackOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide16>(value, func);
else
setNumberOfStackArgs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_functionIndex;
unsigned m_signatureIndex;
unsigned m_stackOffset;
unsigned m_numberOfStackArgs;
unsigned m_tableIndex;
};
struct WasmCallIndirectNoTls : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_call_indirect_no_tls;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& functionIndex, unsigned& signatureIndex, unsigned& stackOffset, unsigned& numberOfStackArgs, unsigned& tableIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(functionIndex)
&& Fits<unsigned, __size>::check(signatureIndex)
&& Fits<unsigned, __size>::check(stackOffset)
&& Fits<unsigned, __size>::check(numberOfStackArgs)
&& Fits<unsigned, __size>::check(tableIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister functionIndex, unsigned signatureIndex, unsigned stackOffset, unsigned numberOfStackArgs, unsigned tableIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, functionIndex, signatureIndex, stackOffset, numberOfStackArgs, tableIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(functionIndex));
gen->write(Fits<unsigned, __size>::convert(signatureIndex));
gen->write(Fits<unsigned, __size>::convert(stackOffset));
gen->write(Fits<unsigned, __size>::convert(numberOfStackArgs));
gen->write(Fits<unsigned, __size>::convert(tableIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**call_indirect_no_tls"[2 - __sizeShiftAmount]);
dumper->dumpOperand("functionIndex", m_functionIndex, true);
dumper->dumpOperand("signatureIndex", m_signatureIndex, false);
dumper->dumpOperand("stackOffset", m_stackOffset, false);
dumper->dumpOperand("numberOfStackArgs", m_numberOfStackArgs, false);
dumper->dumpOperand("tableIndex", m_tableIndex, false);
}
WasmCallIndirectNoTls(const uint8_t* stream)
: m_functionIndex(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_signatureIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[1]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirectNoTls(const uint16_t* stream)
: m_functionIndex(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_signatureIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[1]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCallIndirectNoTls(const uint32_t* stream)
: m_functionIndex(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_signatureIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[1]))
, m_stackOffset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_numberOfStackArgs(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
, m_tableIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCallIndirectNoTls decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setFunctionIndex(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setFunctionIndex<OpcodeSize::Wide16>(value, func);
else
setFunctionIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setFunctionIndex(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSignatureIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSignatureIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSignatureIndex<OpcodeSize::Wide16>(value, func);
else
setSignatureIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSignatureIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setStackOffset<OpcodeSize::Wide16>(value, func);
else
setStackOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setStackOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setNumberOfStackArgs<OpcodeSize::Wide16>(value, func);
else
setNumberOfStackArgs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNumberOfStackArgs(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTableIndex<OpcodeSize::Wide16>(value, func);
else
setTableIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTableIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_functionIndex;
unsigned m_signatureIndex;
unsigned m_stackOffset;
unsigned m_numberOfStackArgs;
unsigned m_tableIndex;
};
struct WasmCurrentMemory : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_current_memory;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**current_memory"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
}
WasmCurrentMemory(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCurrentMemory(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmCurrentMemory(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmCurrentMemory decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
};
struct WasmGrowMemory : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_grow_memory;
static constexpr size_t length = 3;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister delta)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, delta);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister delta)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, delta);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister delta)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, delta))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, delta))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, delta);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& delta)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(delta)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister delta)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, delta)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(delta));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**grow_memory"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("delta", m_delta, false);
}
WasmGrowMemory(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_delta(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmGrowMemory(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_delta(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmGrowMemory(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_delta(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmGrowMemory decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setDelta(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDelta<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDelta<OpcodeSize::Wide16>(value, func);
else
setDelta<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDelta(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_delta;
};
struct WasmMemoryFill : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_memory_fill;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister targetValue, VirtualRegister count)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dstAddress, targetValue, count);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister targetValue, VirtualRegister count)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dstAddress, targetValue, count);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister targetValue, VirtualRegister count)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dstAddress, targetValue, count))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dstAddress, targetValue, count))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dstAddress, targetValue, count);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dstAddress, VirtualRegister& targetValue, VirtualRegister& count)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dstAddress)
&& Fits<VirtualRegister, __size>::check(targetValue)
&& Fits<VirtualRegister, __size>::check(count)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister targetValue, VirtualRegister count)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dstAddress, targetValue, count)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dstAddress));
gen->write(Fits<VirtualRegister, __size>::convert(targetValue));
gen->write(Fits<VirtualRegister, __size>::convert(count));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**memory_fill"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dstAddress", m_dstAddress, true);
dumper->dumpOperand("targetValue", m_targetValue, false);
dumper->dumpOperand("count", m_count, false);
}
WasmMemoryFill(const uint8_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_targetValue(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_count(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryFill(const uint16_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_targetValue(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_count(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryFill(const uint32_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_targetValue(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_count(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmMemoryFill decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDstAddress(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDstAddress<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDstAddress<OpcodeSize::Wide16>(value, func);
else
setDstAddress<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDstAddress(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTargetValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTargetValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTargetValue<OpcodeSize::Wide16>(value, func);
else
setTargetValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTargetValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCount(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setCount<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setCount<OpcodeSize::Wide16>(value, func);
else
setCount<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCount(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dstAddress;
VirtualRegister m_targetValue;
VirtualRegister m_count;
};
struct WasmMemoryCopy : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_memory_copy;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister count)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dstAddress, srcAddress, count);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister count)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dstAddress, srcAddress, count);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister count)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dstAddress, srcAddress, count))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dstAddress, srcAddress, count))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dstAddress, srcAddress, count);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dstAddress, VirtualRegister& srcAddress, VirtualRegister& count)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dstAddress)
&& Fits<VirtualRegister, __size>::check(srcAddress)
&& Fits<VirtualRegister, __size>::check(count)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister count)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dstAddress, srcAddress, count)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dstAddress));
gen->write(Fits<VirtualRegister, __size>::convert(srcAddress));
gen->write(Fits<VirtualRegister, __size>::convert(count));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**memory_copy"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dstAddress", m_dstAddress, true);
dumper->dumpOperand("srcAddress", m_srcAddress, false);
dumper->dumpOperand("count", m_count, false);
}
WasmMemoryCopy(const uint8_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_srcAddress(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_count(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryCopy(const uint16_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_srcAddress(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_count(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryCopy(const uint32_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_srcAddress(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_count(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmMemoryCopy decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDstAddress(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDstAddress<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDstAddress<OpcodeSize::Wide16>(value, func);
else
setDstAddress<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDstAddress(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrcAddress(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSrcAddress<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSrcAddress<OpcodeSize::Wide16>(value, func);
else
setSrcAddress<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcAddress(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCount(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setCount<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setCount<OpcodeSize::Wide16>(value, func);
else
setCount<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCount(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dstAddress;
VirtualRegister m_srcAddress;
VirtualRegister m_count;
};
struct WasmMemoryInit : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_memory_init;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister length, unsigned dataSegmentIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dstAddress, srcAddress, length, dataSegmentIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister length, unsigned dataSegmentIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dstAddress, srcAddress, length, dataSegmentIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister length, unsigned dataSegmentIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dstAddress, srcAddress, length, dataSegmentIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dstAddress, srcAddress, length, dataSegmentIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dstAddress, srcAddress, length, dataSegmentIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dstAddress, VirtualRegister& srcAddress, VirtualRegister& length, unsigned& dataSegmentIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dstAddress)
&& Fits<VirtualRegister, __size>::check(srcAddress)
&& Fits<VirtualRegister, __size>::check(length)
&& Fits<unsigned, __size>::check(dataSegmentIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dstAddress, VirtualRegister srcAddress, VirtualRegister length, unsigned dataSegmentIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dstAddress, srcAddress, length, dataSegmentIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dstAddress));
gen->write(Fits<VirtualRegister, __size>::convert(srcAddress));
gen->write(Fits<VirtualRegister, __size>::convert(length));
gen->write(Fits<unsigned, __size>::convert(dataSegmentIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**memory_init"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dstAddress", m_dstAddress, true);
dumper->dumpOperand("srcAddress", m_srcAddress, false);
dumper->dumpOperand("length", m_length, false);
dumper->dumpOperand("dataSegmentIndex", m_dataSegmentIndex, false);
}
WasmMemoryInit(const uint8_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_srcAddress(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_dataSegmentIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryInit(const uint16_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_srcAddress(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_dataSegmentIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryInit(const uint32_t* stream)
: m_dstAddress(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_srcAddress(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_length(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_dataSegmentIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmMemoryInit decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDstAddress(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDstAddress<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDstAddress<OpcodeSize::Wide16>(value, func);
else
setDstAddress<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDstAddress(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setSrcAddress(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setSrcAddress<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setSrcAddress<OpcodeSize::Wide16>(value, func);
else
setSrcAddress<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setSrcAddress(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLength<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLength<OpcodeSize::Wide16>(value, func);
else
setLength<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLength(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setDataSegmentIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDataSegmentIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDataSegmentIndex<OpcodeSize::Wide16>(value, func);
else
setDataSegmentIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDataSegmentIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dstAddress;
VirtualRegister m_srcAddress;
VirtualRegister m_length;
unsigned m_dataSegmentIndex;
};
struct WasmDataDrop : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_data_drop;
static constexpr size_t length = 2;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, unsigned dataSegmentIndex)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dataSegmentIndex);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, unsigned dataSegmentIndex)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dataSegmentIndex);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, unsigned dataSegmentIndex)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dataSegmentIndex))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dataSegmentIndex))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dataSegmentIndex);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, unsigned& dataSegmentIndex)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<unsigned, __size>::check(dataSegmentIndex)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, unsigned dataSegmentIndex)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dataSegmentIndex)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<unsigned, __size>::convert(dataSegmentIndex));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**data_drop"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dataSegmentIndex", m_dataSegmentIndex, true);
}
WasmDataDrop(const uint8_t* stream)
: m_dataSegmentIndex(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmDataDrop(const uint16_t* stream)
: m_dataSegmentIndex(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmDataDrop(const uint32_t* stream)
: m_dataSegmentIndex(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[0]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmDataDrop decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDataSegmentIndex(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDataSegmentIndex<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDataSegmentIndex<OpcodeSize::Wide16>(value, func);
else
setDataSegmentIndex<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDataSegmentIndex(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
unsigned m_dataSegmentIndex;
};
struct WasmSelect : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_select;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister condition, VirtualRegister nonZero, VirtualRegister zero)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, condition, nonZero, zero);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister condition, VirtualRegister nonZero, VirtualRegister zero)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, condition, nonZero, zero);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister condition, VirtualRegister nonZero, VirtualRegister zero)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, condition, nonZero, zero))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, condition, nonZero, zero))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, condition, nonZero, zero);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& condition, VirtualRegister& nonZero, VirtualRegister& zero)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(condition)
&& Fits<VirtualRegister, __size>::check(nonZero)
&& Fits<VirtualRegister, __size>::check(zero)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister condition, VirtualRegister nonZero, VirtualRegister zero)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, condition, nonZero, zero)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(condition));
gen->write(Fits<VirtualRegister, __size>::convert(nonZero));
gen->write(Fits<VirtualRegister, __size>::convert(zero));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**select"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("condition", m_condition, false);
dumper->dumpOperand("nonZero", m_nonZero, false);
dumper->dumpOperand("zero", m_zero, false);
}
WasmSelect(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_condition(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_nonZero(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
, m_zero(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSelect(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_condition(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_nonZero(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
, m_zero(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmSelect(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_condition(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_nonZero(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
, m_zero(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmSelect decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setCondition<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setCondition<OpcodeSize::Wide16>(value, func);
else
setCondition<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCondition(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setNonZero(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setNonZero<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setNonZero<OpcodeSize::Wide16>(value, func);
else
setNonZero<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setNonZero(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setZero(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setZero<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setZero<OpcodeSize::Wide16>(value, func);
else
setZero<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setZero(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_condition;
VirtualRegister m_nonZero;
VirtualRegister m_zero;
};
struct WasmLoad8U : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_load8_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**load8_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmLoad8U(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad8U(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad8U(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmLoad8U decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmLoad16U : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_load16_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**load16_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmLoad16U(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad16U(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad16U(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmLoad16U decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmLoad32U : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_load32_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**load32_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmLoad32U(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad32U(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad32U(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmLoad32U decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmLoad64U : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_load64_u;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**load64_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmLoad64U(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad64U(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmLoad64U(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmLoad64U decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmI32Load8S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_load8_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_load8_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmI32Load8S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Load8S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Load8S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Load8S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmI64Load8S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_load8_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_load8_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmI64Load8S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Load8S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Load8S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Load8S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmI32Load16S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_load16_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_load16_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmI32Load16S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Load16S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Load16S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Load16S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmI64Load16S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_load16_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_load16_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmI64Load16S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Load16S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Load16S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Load16S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmI64Load32S : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_load32_s;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_load32_s"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmI64Load32S(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Load32S(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64Load32S(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64Load32S decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
};
struct WasmStore8 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_store8;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, pointer, value, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, pointer, value, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, pointer, value, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& pointer, VirtualRegister& value, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, pointer, value, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**store8"[2 - __sizeShiftAmount]);
dumper->dumpOperand("pointer", m_pointer, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmStore8(const uint8_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore8(const uint16_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore8(const uint32_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmStore8 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_pointer;
VirtualRegister m_value;
unsigned m_offset;
};
struct WasmStore16 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_store16;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, pointer, value, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, pointer, value, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, pointer, value, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& pointer, VirtualRegister& value, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, pointer, value, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**store16"[2 - __sizeShiftAmount]);
dumper->dumpOperand("pointer", m_pointer, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmStore16(const uint8_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore16(const uint16_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore16(const uint32_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmStore16 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_pointer;
VirtualRegister m_value;
unsigned m_offset;
};
struct WasmStore32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_store32;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, pointer, value, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, pointer, value, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, pointer, value, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& pointer, VirtualRegister& value, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, pointer, value, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**store32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("pointer", m_pointer, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmStore32(const uint8_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore32(const uint16_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore32(const uint32_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmStore32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_pointer;
VirtualRegister m_value;
unsigned m_offset;
};
struct WasmStore64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_store64;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, pointer, value, offset);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, pointer, value, offset);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, pointer, value, offset))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, pointer, value, offset);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& pointer, VirtualRegister& value, unsigned& offset)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<unsigned, __size>::check(offset)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister pointer, VirtualRegister value, unsigned offset)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, pointer, value, offset)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<unsigned, __size>::convert(offset));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**store64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("pointer", m_pointer, true);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("offset", m_offset, false);
}
WasmStore64(const uint8_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore64(const uint16_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmStore64(const uint32_t* stream)
: m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmStore64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
VirtualRegister m_pointer;
VirtualRegister m_value;
unsigned m_offset;
};
struct WasmI64AtomicRmwAdd : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw_add;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw_add"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmwAdd(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwAdd(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwAdd(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmwAdd decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw8AddU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw8_add_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw8_add_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw8AddU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8AddU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8AddU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw8AddU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw16AddU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw16_add_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw16_add_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw16AddU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16AddU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16AddU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw16AddU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw32AddU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw32_add_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw32_add_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw32AddU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32AddU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32AddU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw32AddU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmwSub : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw_sub;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw_sub"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmwSub(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwSub(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwSub(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmwSub decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw8SubU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw8_sub_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw8_sub_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw8SubU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8SubU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8SubU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw8SubU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw16SubU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw16_sub_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw16_sub_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw16SubU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16SubU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16SubU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw16SubU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw32SubU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw32_sub_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw32_sub_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw32SubU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32SubU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32SubU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw32SubU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmwAnd : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw_and;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw_and"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmwAnd(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwAnd(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwAnd(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmwAnd decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw8AndU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw8_and_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw8_and_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw8AndU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8AndU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8AndU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw8AndU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw16AndU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw16_and_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw16_and_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw16AndU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16AndU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16AndU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw16AndU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw32AndU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw32_and_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw32_and_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw32AndU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32AndU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32AndU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw32AndU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmwOr : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw_or;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw_or"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmwOr(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwOr(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwOr(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmwOr decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw8OrU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw8_or_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw8_or_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw8OrU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8OrU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8OrU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw8OrU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw16OrU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw16_or_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw16_or_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw16OrU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16OrU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16OrU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw16OrU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw32OrU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw32_or_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw32_or_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw32OrU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32OrU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32OrU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw32OrU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmwXor : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw_xor;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw_xor"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmwXor(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwXor(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwXor(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmwXor decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw8XorU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw8_xor_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw8_xor_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw8XorU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8XorU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8XorU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw8XorU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw16XorU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw16_xor_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw16_xor_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw16XorU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16XorU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16XorU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw16XorU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw32XorU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw32_xor_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw32_xor_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw32XorU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32XorU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32XorU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw32XorU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmwXchg : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw_xchg;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw_xchg"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmwXchg(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwXchg(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwXchg(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmwXchg decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw8XchgU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw8_xchg_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw8_xchg_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw8XchgU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8XchgU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8XchgU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw8XchgU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw16XchgU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw16_xchg_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw16_xchg_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw16XchgU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16XchgU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16XchgU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw16XchgU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw32XchgU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw32_xchg_u;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw32_xchg_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw32XchgU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32XchgU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32XchgU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw32XchgU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
};
struct WasmI64AtomicRmwCmpxchg : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw_cmpxchg;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, expected, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& expected, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(expected)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, expected, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(expected));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw_cmpxchg"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("expected", m_expected, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmwCmpxchg(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwCmpxchg(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmwCmpxchg(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmwCmpxchg decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide16>(value, func);
else
setExpected<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_expected;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw8CmpxchgU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw8_cmpxchg_u;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, expected, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& expected, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(expected)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, expected, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(expected));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw8_cmpxchg_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("expected", m_expected, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw8CmpxchgU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8CmpxchgU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw8CmpxchgU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw8CmpxchgU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide16>(value, func);
else
setExpected<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_expected;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw16CmpxchgU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw16_cmpxchg_u;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, expected, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& expected, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(expected)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, expected, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(expected));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw16_cmpxchg_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("expected", m_expected, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw16CmpxchgU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16CmpxchgU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw16CmpxchgU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw16CmpxchgU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide16>(value, func);
else
setExpected<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_expected;
VirtualRegister m_value;
};
struct WasmI64AtomicRmw32CmpxchgU : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i64_atomic_rmw32_cmpxchg_u;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, expected, value);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, expected, value))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, expected, value);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& expected, VirtualRegister& value)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(expected)
&& Fits<VirtualRegister, __size>::check(value)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister expected, VirtualRegister value)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, expected, value)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(expected));
gen->write(Fits<VirtualRegister, __size>::convert(value));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i64_atomic_rmw32_cmpxchg_u"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("expected", m_expected, false);
dumper->dumpOperand("value", m_value, false);
}
WasmI64AtomicRmw32CmpxchgU(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32CmpxchgU(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI64AtomicRmw32CmpxchgU(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_expected(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI64AtomicRmw32CmpxchgU decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setExpected<OpcodeSize::Wide16>(value, func);
else
setExpected<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setExpected(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_expected;
VirtualRegister m_value;
};
struct WasmMemoryAtomicWait32 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_memory_atomic_wait32;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value, timeout);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value, timeout);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value, timeout))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value, timeout))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value, timeout);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value, VirtualRegister& timeout)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<VirtualRegister, __size>::check(timeout)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value, timeout)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<VirtualRegister, __size>::convert(timeout));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**memory_atomic_wait32"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("timeout", m_timeout, false);
}
WasmMemoryAtomicWait32(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_timeout(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryAtomicWait32(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_timeout(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryAtomicWait32(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_timeout(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmMemoryAtomicWait32 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTimeout(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTimeout<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTimeout<OpcodeSize::Wide16>(value, func);
else
setTimeout<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTimeout(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
VirtualRegister m_timeout;
};
struct WasmMemoryAtomicWait64 : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_memory_atomic_wait64;
static constexpr size_t length = 6;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, value, timeout);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, value, timeout);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value, timeout))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, value, timeout))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, value, timeout);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& value, VirtualRegister& timeout)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(value)
&& Fits<VirtualRegister, __size>::check(timeout)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister value, VirtualRegister timeout)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, value, timeout)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(value));
gen->write(Fits<VirtualRegister, __size>::convert(timeout));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**memory_atomic_wait64"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("value", m_value, false);
dumper->dumpOperand("timeout", m_timeout, false);
}
WasmMemoryAtomicWait64(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
, m_timeout(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryAtomicWait64(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
, m_timeout(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryAtomicWait64(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_value(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
, m_timeout(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[4]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmMemoryAtomicWait64 decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setValue<OpcodeSize::Wide16>(value, func);
else
setValue<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setValue(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setTimeout(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setTimeout<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setTimeout<OpcodeSize::Wide16>(value, func);
else
setTimeout<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setTimeout(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 4 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_value;
VirtualRegister m_timeout;
};
struct WasmMemoryAtomicNotify : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_memory_atomic_notify;
static constexpr size_t length = 5;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister count)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, pointer, offset, count);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister count)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, pointer, offset, count);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister count)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, count))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, pointer, offset, count))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, pointer, offset, count);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& pointer, unsigned& offset, VirtualRegister& count)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(pointer)
&& Fits<unsigned, __size>::check(offset)
&& Fits<VirtualRegister, __size>::check(count)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister pointer, unsigned offset, VirtualRegister count)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, pointer, offset, count)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(pointer));
gen->write(Fits<unsigned, __size>::convert(offset));
gen->write(Fits<VirtualRegister, __size>::convert(count));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**memory_atomic_notify"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("pointer", m_pointer, false);
dumper->dumpOperand("offset", m_offset, false);
dumper->dumpOperand("count", m_count, false);
}
WasmMemoryAtomicNotify(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Narrow>::convert(stream[2]))
, m_count(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryAtomicNotify(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide16>::convert(stream[2]))
, m_count(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmMemoryAtomicNotify(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_pointer(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_offset(Fits<unsigned, OpcodeSize::Wide32>::convert(stream[2]))
, m_count(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[3]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmMemoryAtomicNotify decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setPointer<OpcodeSize::Wide16>(value, func);
else
setPointer<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setPointer(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setOffset(unsigned value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setOffset<OpcodeSize::Wide16>(value, func);
else
setOffset<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setOffset(unsigned value, Functor func)
{
if (!Fits<unsigned, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<unsigned, size>::convert(value);
}
template<typename Functor>
void setCount(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setCount<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setCount<OpcodeSize::Wide16>(value, func);
else
setCount<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setCount(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 3 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_pointer;
unsigned m_offset;
VirtualRegister m_count;
};
struct WasmI32Add : public Instruction {
static constexpr WasmOpcodeID opcodeID = wasm_i32_add;
static constexpr size_t length = 4;
template<typename BytecodeGenerator>
static void emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
emitWithSmallestSizeRequirement<OpcodeSize::Narrow, BytecodeGenerator>(gen, dst, lhs, rhs);
}
template<OpcodeSize __size, typename BytecodeGenerator, FitsAssertion shouldAssert = Assert, bool recordOpcode = true>
static bool emit(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
bool didEmit = emitImpl<__size, recordOpcode, BytecodeGenerator>(gen, dst, lhs, rhs);
if (shouldAssert == Assert)
ASSERT(didEmit);
return didEmit;
}
template<OpcodeSize __size, typename BytecodeGenerator>
static void emitWithSmallestSizeRequirement(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Narrow)) {
if (emit<OpcodeSize::Narrow, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
if (static_cast<unsigned>(__size) <= static_cast<unsigned>(OpcodeSize::Wide16)) {
if (emit<OpcodeSize::Wide16, BytecodeGenerator, NoAssert, true>(gen, dst, lhs, rhs))
return;
}
emit<OpcodeSize::Wide32, BytecodeGenerator, Assert, true>(gen, dst, lhs, rhs);
}
private:
template<OpcodeSize __size, typename BytecodeGenerator>
static bool checkImpl(BytecodeGenerator* gen, VirtualRegister& dst, VirtualRegister& lhs, VirtualRegister& rhs)
{
UNUSED_PARAM(gen);
#if OS(WINDOWS) && ENABLE(C_LOOP)
// FIXME: Disable wide16 optimization for Windows CLoop
// https://bugs.webkit.org/show_bug.cgi?id=198283
if (__size == OpcodeSize::Wide16)
return false;
#endif
return Fits<WasmOpcodeID, __size>::check(opcodeID)
&& Fits<VirtualRegister, __size>::check(dst)
&& Fits<VirtualRegister, __size>::check(lhs)
&& Fits<VirtualRegister, __size>::check(rhs)
&& (__size == OpcodeSize::Wide16 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide16) : true)
&& (__size == OpcodeSize::Wide32 ? Fits<WasmOpcodeID, OpcodeSize::Narrow>::check(wasm_wide32) : true);
}
template<OpcodeSize __size, bool recordOpcode, typename BytecodeGenerator>
static bool emitImpl(BytecodeGenerator* gen, VirtualRegister dst, VirtualRegister lhs, VirtualRegister rhs)
{
if (__size == OpcodeSize::Wide16)
gen->alignWideOpcode16();
else if (__size == OpcodeSize::Wide32)
gen->alignWideOpcode32();
if (checkImpl<__size>(gen, dst, lhs, rhs)) {
if (recordOpcode)
gen->recordOpcode(opcodeID);
if (__size == OpcodeSize::Wide16)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide16));
else if (__size == OpcodeSize::Wide32)
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(wasm_wide32));
gen->write(Fits<WasmOpcodeID, OpcodeSize::Narrow>::convert(opcodeID));
gen->write(Fits<VirtualRegister, __size>::convert(dst));
gen->write(Fits<VirtualRegister, __size>::convert(lhs));
gen->write(Fits<VirtualRegister, __size>::convert(rhs));
return true;
}
return false;
}
public:
void dump(BytecodeDumperBase* dumper, InstructionStream::Offset __location, int __sizeShiftAmount)
{
dumper->printLocationAndOp(__location, &"**i32_add"[2 - __sizeShiftAmount]);
dumper->dumpOperand("dst", m_dst, true);
dumper->dumpOperand("lhs", m_lhs, false);
dumper->dumpOperand("rhs", m_rhs, false);
}
WasmI32Add(const uint8_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Narrow>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Add(const uint16_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide16>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
WasmI32Add(const uint32_t* stream)
: m_dst(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[0]))
, m_lhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[1]))
, m_rhs(Fits<VirtualRegister, OpcodeSize::Wide32>::convert(stream[2]))
{
ASSERT_UNUSED(stream, bitwise_cast<const uint8_t*>(stream)[-1] == opcodeID);
}
static WasmI32Add decode(const uint8_t* stream)
{
// A pointer is pointing to the first operand (opcode and prefix are not included).
if (*stream == wasm_wide32)
return { bitwise_cast<const uint32_t*>(stream + 2) };
if (*stream == wasm_wide16)
return { bitwise_cast<const uint16_t*>(stream + 2) };
return { stream + 1 };
}
template<typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setDst<OpcodeSize::Wide16>(value, func);
else
setDst<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setDst(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 0 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setLhs<OpcodeSize::Wide16>(value, func);
else
setLhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setLhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 1 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
template<typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (isWide32<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide32>(value, func);
else if (isWide16<WasmOpcodeTraits>())
setRhs<OpcodeSize::Wide16>(value, func);
else
setRhs<OpcodeSize::Narrow>(value, func);
}
template <OpcodeSize size, typename Functor>
void setRhs(VirtualRegister value, Functor func)
{
if (!Fits<VirtualRegister, size>::check(value))
value = func();
auto* stream = bitwise_cast<typename TypeBySize<size>::unsignedType*>(reinterpret_cast<uint8_t*>(this) + 2 * size + PaddingBySize<size>::value + /* Opcode size */ 1);
*stream = Fits<VirtualRegister, size>::convert(value);
}
VirtualRegister m_dst;
VirtualRegister m_lhs;
VirtualRegister m_rhs;
};
#endif // ENABLE(WEBASSEMBLY)
} // namespace JSC
// SHA1Hash: a76f4e6da941f840c6fa40e2cfc17431f4a02ca8