fixed for code style check

Signed-off-by: getingke <getingke@huawei.com>
Change-Id: I3ff8ba9fbeaa282d02f4832a4b6a85e054670765
This commit is contained in:
getingke 2022-05-18 20:55:17 +08:00
parent ccd3055ca6
commit b02bd86a9c
11 changed files with 97 additions and 79 deletions

View File

@ -165,12 +165,15 @@ void AssemblerAarch64::Ldp(const VectorRegister &vt, const VectorRegister &vt2,
uint64_t imm = operand.GetImmediate().Value();
switch (vt.GetScale()) {
case S:
// 2 : 2 means remove trailing zeros
imm >>= 2;
break;
case D:
// 3 : 3 means remove trailing zeros
imm >>= 3;
break;
case Q:
// 4 : 4 means remove trailing zeros
imm >>= 4;
break;
default:
@ -205,12 +208,15 @@ void AssemblerAarch64::Stp(const VectorRegister &vt, const VectorRegister &vt2,
uint64_t imm = operand.GetImmediate().Value();
switch (vt.GetScale()) {
case S:
// 2 : 2 means remove trailing zeros
imm >>= 2;
break;
case D:
// 3 : 3 means remove trailing zeros
imm >>= 3;
break;
case Q:
// 4 : 4 means remove trailing zeros
imm >>= 4;
break;
default:
@ -228,7 +234,7 @@ void AssemblerAarch64::Stp(const VectorRegister &vt, const VectorRegister &vt2,
uint32_t AssemblerAarch64::GetOpcFromScale(Scale scale, bool ispair)
{
uint32_t opc = 0;
switch(scale) {
switch (scale) {
case Scale::B:
case Scale::H:
ASSERT(!ispair);
@ -241,6 +247,7 @@ uint32_t AssemblerAarch64::GetOpcFromScale(Scale scale, bool ispair)
opc = ispair ? 1 : 1;
break;
case Scale::Q:
// 3 : means opc bit is 11
opc = ispair ? 1 : 3;
break;
default:
@ -614,12 +621,10 @@ void AssemblerAarch64::Movn(const Register &rd, uint64_t imm, int shift)
void AssemblerAarch64::MovWide(uint32_t op, const Register &rd, uint64_t imm, int shift)
{
#define Imm16(x) (((x) << MOV_WIDE_Imm16_LOWBITS) & MOV_WIDE_Imm16_MASK)
#define Hw(x) (((x) << MOV_WIDE_Hw_LOWBITS) & MOV_WIDE_Hw_MASK)
uint32_t code = Sf(!rd.IsW()) | op | Imm16(imm) | Hw((shift/16)) | Rd(rd.GetId());
uint32_t imm_field = (imm << MOV_WIDE_Imm16_LOWBITS) & MOV_WIDE_Imm16_MASK;
uint32_t hw_field = ((shift / 16) << MOV_WIDE_Hw_LOWBITS) & MOV_WIDE_Hw_MASK;
uint32_t code = Sf(!rd.IsW()) | op | imm_field | hw_field | Rd(rd.GetId());
EmitU32(code);
#undef Imm16
#undef Hw
}
@ -653,13 +658,11 @@ void AssemblerAarch64::BitWiseOpImm(BitwiseOpCode op, const Register &rd, const
void AssemblerAarch64::BitWiseOpShift(BitwiseOpCode op, const Register &rd, const Register &rn, const Operand &operand)
{
#define Shift(x) (((x) << BITWISE_OP_Shift_LOWBITS) & BITWISE_OP_Shift_MASK)
#define ShiftAmount(x) (((x) << BITWISE_OP_ShiftAmount_LOWBITS) & BITWISE_OP_ShiftAmount_MASK)
uint32_t code = Sf(!rd.IsW()) | op | Shift(operand.GetShiftOption()) | Rm(operand.Reg().GetId())
| ShiftAmount(operand.GetShiftAmount()) | Rn(rn.GetId()) | Rd(rd.GetId());
uint32_t shift_field = (operand.GetShiftOption() << BITWISE_OP_Shift_LOWBITS) & BITWISE_OP_Shift_MASK;
uint32_t shift_amount = (operand.GetShiftAmount() << BITWISE_OP_ShiftAmount_LOWBITS) & BITWISE_OP_ShiftAmount_MASK;
uint32_t code = Sf(!rd.IsW()) | op | shift_field | Rm(operand.Reg().GetId())
|shift_amount | Rn(rn.GetId()) | Rd(rd.GetId());
EmitU32(code);
#undef ShiftAmount
#undef Shift
}
void AssemblerAarch64::Lsl(const Register &rd, const Register &rn, const Register &rm)
@ -671,7 +674,7 @@ void AssemblerAarch64::Lsl(const Register &rd, const Register &rn, const Registe
void AssemblerAarch64::Lsr(const Register &rd, const Register &rn, const Register &rm)
{
uint32_t code = Sf(!rd.IsW()) | LSR_Reg | Rm(rm.GetId()) | Rn(rn.GetId()) | Rd(rd.GetId());
EmitU32(code);
EmitU32(code);
}
void AssemblerAarch64::Ubfm(const Register &rd, const Register &rn, unsigned immr, unsigned imms)
@ -681,7 +684,7 @@ void AssemblerAarch64::Ubfm(const Register &rd, const Register &rn, unsigned imm
uint32_t immr_field = (immr << BITWISE_OP_Immr_LOWBITS) & BITWISE_OP_Immr_MASK;
uint32_t imms_field = (imms << BITWISE_OP_Imms_LOWBITS) & BITWISE_OP_Imms_MASK;
uint32_t code = Sf(sf) | UBFM | n | immr_field | imms_field | Rn(rn.GetId()) | Rd(rd.GetId());
EmitU32(code);
EmitU32(code);
}
void AssemblerAarch64::Lsr(const Register &rd, const Register &rn, unsigned shift)
@ -1065,7 +1068,6 @@ void AssemblerAarch64::Ret(const Register &rn)
void AssemblerAarch64::Brk(const Immediate &imm)
{
uint32_t brk_number_field =
(static_cast<uint32_t>(imm.Value()) << BRK_Imm16_LOWBITS) & BRK_Imm16_MASK;
uint32_t code = BRKImm | brk_number_field;

View File

@ -84,7 +84,7 @@ public:
}
private:
VectorRegisterId reg_;
Scale scale_;
Scale scale_;
};
class Immediate {

View File

@ -56,7 +56,7 @@ void ExtendedAssembler::CallAssemblerStub(int id, bool isTail)
void ExtendedAssembler::BindAssemblerStub(int id)
{
Label *target = module_->GetFunctionLabel(id);
Bind(target);
Bind(target);
}
void ExtendedAssembler::SaveFpAndLr()

View File

@ -559,6 +559,7 @@ void AssemblerX64::Jmp(Register dst)
EmitRexPrefix(dst);
// opcode FF/4 : jmp r/m64
EmitU8(0xFF);
// 4 means register
EmitModrm(4, dst);
}
@ -836,7 +837,8 @@ void AssemblerX64::Movl(Operand src, Register dst)
EmitOperand(dst, src);
}
void AssemblerX64::Testq(Immediate src, Register dst) {
void AssemblerX64::Testq(Immediate src, Register dst)
{
if (InRange8(src.Value())) {
Testb(src, dst);
} else if (dst == rax) {

View File

@ -26,7 +26,7 @@ void AssemblerModule::Run(const std::string &triple, Chunk* chunk)
SetUpForAsmStubs();
if (triple.compare("x86_64-unknown-linux-gnu") == 0) {
GenerateStubsX64(chunk);
} else if (triple.compare("aarch64-unknown-linux-gnu") == 0){
} else if (triple.compare("aarch64-unknown-linux-gnu") == 0) {
GenerateStubsAarch64(chunk);
} else {
UNREACHABLE();
@ -70,7 +70,7 @@ void AssemblerModule::GenerateStubsAarch64(Chunk* chunk)
void AssemblerModule::SetUpForAsmStubs()
{
RuntimeStubCSigns::GetASMCSigns(asmCallSigns_);
for(auto cs : asmCallSigns_) {
for (auto cs : asmCallSigns_) {
symbolTable_[cs->GetID()] = new Label();
}
}

View File

@ -604,13 +604,13 @@ DEF_CALL_SIGNATURE(JSFunctionEntry)
CallSignature jsCallFunctionEntry("JSFunctionEntry", 0, 6,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = jsCallFunctionEntry;
std::array<VariableType, 6> params = { // 4 : 4 input parameters
std::array<VariableType, 6> params = { // 6 : 6 input parameters
VariableType::NATIVE_POINTER(), // glue
VariableType::NATIVE_POINTER(), // prev fp
VariableType::INT32(), // expectedNumArgs
VariableType::INT32(), // actualNumArgs
VariableType::NATIVE_POINTER(), // argv
VariableType::NATIVE_POINTER(), // codeAddr
VariableType::INT32(), // expectedNumArgs
VariableType::INT32(), // actualNumArgs
VariableType::NATIVE_POINTER(), // argv
VariableType::NATIVE_POINTER(), // codeAddr
};
callSign->SetParameters(params.data());
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);

View File

@ -274,7 +274,7 @@ GateRef CircuitBuilder::CallRuntimeVarargs(GateRef glue, int index, GateRef argc
GateRef target = IntPtr(index);
auto label = GetCurrentLabel();
auto depend = label->GetDepend();
assert(cs->IsRuntimeVAStub());
assert(cs->IsRuntimeVAStub());
GateRef result = Call(cs, glue, target, depend, {argc, argv});
label->SetDepend(result);
return result;

View File

@ -1863,7 +1863,6 @@ LLVMTypeRef LLVMModule::GetFuncType(const CallSignature *stubDescriptor)
std::vector<LLVMTypeRef> paramTys;
auto paramCount = stubDescriptor->GetParametersCount();
int extraParameterCnt = 0;
auto paramsType = stubDescriptor->GetParametersType();
if (paramsType != nullptr) {

View File

@ -78,23 +78,25 @@ void AssemblerStubs::CallRuntime(ExtendedAssembler *assembler)
__ Str(fp, MemoryOperand(glue, JSThread::GlueData::GetLeaveFrameOffset(false)));
// construct Leave Frame and callee save
__ Mov(frameType, Immediate(static_cast<int64_t>(FrameType::LEAVE_FRAME)));
__ Stp(tmp, frameType, MemoryOperand(sp, -16, MemoryOperand::AddrMode::PREINDEX));
__ Mov(frameType, Immediate(static_cast<int64_t>(FrameType::LEAVE_FRAME)));
__ Stp(tmp, frameType, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, MemoryOperand::AddrMode::PREINDEX));
// load runtime trampoline address
Register rtfunc(X19);
__ Ldr(tmp, MemoryOperand(fp, 16));
__ Ldr(tmp, MemoryOperand(fp, GetStackArgOffSetToFp(0)));
// 3 : 3 means 2 << 3 = 8
__ Add(tmp, glue, Operand(tmp, LSL, 3));
__ Ldr(rtfunc, MemoryOperand(tmp, JSThread::GlueData::GetRTStubEntriesOffset(false)));
__ Ldr(argC, MemoryOperand(fp, 24));
__ Add(argV, fp, Immediate(32));
__ Ldr(argC, MemoryOperand(fp, GetStackArgOffSetToFp(1)));
__ Add(argV, fp, Immediate(GetStackArgOffSetToFp(2)));
__ Blr(rtfunc);
// callee restore
__ Ldr(tmp, MemoryOperand(sp, 0));
// descontruct frame
__ Add(sp, sp, Immediate(16));
// 2 2 means stack frame slot size
__ Add(sp, sp, Immediate(2 * FRAME_SLOT_SIZE));
__ RestoreFpAndLr();
__ Ret();
}
@ -132,7 +134,7 @@ void AssemblerStubs::JSFunctionEntry(ExtendedAssembler *assembler)
Register fp(X29);
__ BindAssemblerStub(RTSTUB_ID(JSFunctionEntry));
__ Str(Register(X30), MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(Register(X30), MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ CalleeSave();
__ Str(fp, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Mov(fp, sp);
@ -141,7 +143,7 @@ void AssemblerStubs::JSFunctionEntry(ExtendedAssembler *assembler)
Register frameType(X19);
// construct frame
__ Mov(frameType, Immediate(static_cast<int64_t>(FrameType::OPTIMIZED_ENTRY_FRAME)));
__ Stp(prevFp, frameType, MemoryOperand(sp, -16, MemoryOperand::AddrMode::PREINDEX));
__ Stp(prevFp, frameType, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, MemoryOperand::AddrMode::PREINDEX));
Label copyUndefined;
Label copyArguments;
@ -158,7 +160,7 @@ void AssemblerStubs::JSFunctionEntry(ExtendedAssembler *assembler)
__ Bind(&copyUndefined);
__ Sub(count, count, Immediate(1));
__ Cmp(count, actualNumArgs.W());
__ Str(undefValue, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(undefValue, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ B(Condition::HI, &copyUndefined);
Label invokeCompiledJSFunction;
@ -177,32 +179,33 @@ void AssemblerStubs::JSFunctionEntry(ExtendedAssembler *assembler)
__ Add(argVEnd, argV, Operand(argVEnd.W(), UXTW, 3));
__ Bind(&copyArgLoop);
__ Ldr(argValue, MemoryOperand(argVEnd, -8, MemoryOperand::AddrMode::POSTINDEX));
__ Ldr(argValue, MemoryOperand(argVEnd, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ Subs(argC, argC, Immediate(1));
__ Str(argValue, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(argValue, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ B(Condition::NE, &copyArgLoop);
}
__ Bind(&invokeCompiledJSFunction);
{
__ Str(actualNumArgs, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(actualNumArgs, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ Blr(codeAddr);
}
// pop argV argC
// 3 : 3 means argC * 8
__ Add(sp, sp, Operand(tmp, UXTW, 3));
__ Add(sp, sp, Immediate(8));
__ Add(sp, sp, Immediate(FRAME_SLOT_SIZE));
// pop prevLeaveFrameFp to restore thread->currentFrame_
__ Ldr(prevFp, MemoryOperand(sp, 8, MemoryOperand::AddrMode::POSTINDEX));
__ Ldr(prevFp, MemoryOperand(sp, FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ Str(prevFp, MemoryOperand(glue, JSThread::GlueData::GetLeaveFrameOffset(false)));
// pop entry frame type and c-fp
__ Add(sp, sp, Immediate(8));
__ Ldr(fp, MemoryOperand(sp, 8, MemoryOperand::AddrMode::POSTINDEX));
__ Add(sp, sp, Immediate(FRAME_SLOT_SIZE));
__ Ldr(fp, MemoryOperand(sp, FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ CalleeRestore();
// restore return address
__ Ldr(Register(X30), MemoryOperand(sp, 8, MemoryOperand::AddrMode::POSTINDEX));
__ Ldr(Register(X30), MemoryOperand(sp, FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ Ret();
}
@ -236,11 +239,11 @@ void AssemblerStubs::OptimizedCallOptimized(ExtendedAssembler *assembler)
// Construct frame
Register frameType(X5);
__ Mov(frameType, Immediate(static_cast<int64_t>(FrameType::OPTIMIZED_FRAME)));
__ Str(frameType, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(frameType, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
// callee save
Register tmp(X19);
__ Str(tmp, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(tmp, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
Register count(X5, W);
Register actualNumArgs(X2, W);
@ -269,25 +272,27 @@ void AssemblerStubs::OptimizedCallOptimized(ExtendedAssembler *assembler)
Label copyArgLoop;
__ Mov(saveNumArgs, expectedNumArgs);
__ Sub(count, count, Immediate(1));
// 3 : 3 means count * 8
__ Add(argVEnd, argVEnd, Operand(count, UXTW, 3));
__ Bind(&copyArgLoop);
__ Ldr(argValue, MemoryOperand(argVEnd, -8, MemoryOperand::AddrMode::POSTINDEX));
__ Ldr(argValue, MemoryOperand(argVEnd, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ Sub(count, count, Immediate(1));
__ Str(argValue, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(argValue, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ B(Condition::NE, &copyArgLoop);
}
Register codeAddr(X3);
__ Bind(&invokeCompiledJSFunction);
__ Str(actualNumArgs, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(actualNumArgs, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ Blr(codeAddr);
// pop argv
// 3 : 3 means count * 8
__ Add(sp, sp, Operand(saveNumArgs, UXTW, 3));
__ Add(sp, sp, Immediate(8));
__ Add(sp, sp, Immediate(FRAME_SLOT_SIZE));
// callee restore
__ Ldr(saveNumArgs, MemoryOperand(sp, 8, MemoryOperand::AddrMode::POSTINDEX));
__ Ldr(saveNumArgs, MemoryOperand(sp, FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
// desconstruct frame
__ Add(sp, sp, Immediate(8));
__ Add(sp, sp, Immediate(FRAME_SLOT_SIZE));
__ RestoreFpAndLr();
__ Ret();
}
@ -338,7 +343,7 @@ void AssemblerStubs::CallBuiltinTrampoline(ExtendedAssembler *assembler)
// construct leave frame and callee save
Register frameType(X1);
__ Mov(frameType, Immediate(static_cast<int64_t>(FrameType::LEAVE_FRAME)));
__ Stp(nativeFuncAddr, frameType, MemoryOperand(sp, -16, MemoryOperand::AddrMode::PREINDEX));
__ Stp(nativeFuncAddr, frameType, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, MemoryOperand::AddrMode::PREINDEX));
// load runtime trampoline address
__ Ldr(nativeFuncAddr, MemoryOperand(fp, GetStackArgOffSetToFp(0)));
@ -352,7 +357,7 @@ void AssemblerStubs::CallBuiltinTrampoline(ExtendedAssembler *assembler)
__ Str(thread, MemoryOperand(sp, 0));
Register argC(X0);
__ Ldr(argC, MemoryOperand(fp, GetStackArgOffSetToFp(1))); // argc
__ Sub(argC, argC, Immediate(3));
__ Sub(argC, argC, Immediate(NUM_MANDATORY_JSFUNC_ARGS));
__ Str(argC, MemoryOperand(sp, EcmaRuntimeCallInfo::GetNumArgsOffset()));
Register argV(X0);
__ Add(argV, fp, Immediate(GetStackArgOffSetToFp(2))); // argV
@ -365,7 +370,7 @@ void AssemblerStubs::CallBuiltinTrampoline(ExtendedAssembler *assembler)
__ Add(sp, sp, Immediate(sizeof(EcmaRuntimeCallInfo)));
// descontruct leave frame and callee save register
__ Ldp(nativeFuncAddr, frameType, MemoryOperand(sp, 16, MemoryOperand::AddrMode::POSTINDEX));
__ Ldp(nativeFuncAddr, frameType, MemoryOperand(sp, 2 * FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ RestoreFpAndLr();
__ Add(sp, sp, Immediate(8));
__ Ret();
@ -409,7 +414,7 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
Register taggedValue(X2);
Label nonCallable;
Label notJSFunction;
__ Ldr(jsfunc, MemoryOperand(sp, 8));
__ Ldr(jsfunc, MemoryOperand(sp, FRAME_SLOT_SIZE));
__ Mov(taggedValue, JSTaggedValue::TAG_MASK);
__ Cmp(jsfunc, taggedValue);
__ B(Condition::HS, &nonCallable);
@ -425,8 +430,10 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
__ Tbz(bitfield, JSHClass::CallableBit::START_BIT, &nonCallable);
Register jstype(X3, W);
__ And(jstype, bitfield, LogicalImmediate::Create(0xFF, 32));
__ And(jstype, bitfield, LogicalImmediate::Create(0xFF, RegWSize));
// 4 : 4 means JSType::JS_FUNCTION_BEGIN
__ Sub(jstype, jstype, Immediate(4));
// 9 : 9 means JSType::JS_FUNCTION_END - JSType::JS_FUNCTION_BEGIN + 1
__ Cmp(jstype, Immediate(9));
__ B(Condition::HS, &notJSFunction);
@ -436,6 +443,7 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
Label callNativeMethod;
Label callOptimizedMethod;
__ Ldr(method, MemoryOperand(jsfunc, JSFunction::METHOD_OFFSET));
// 8 : 8 means actualArgc on the stack offset
__ Ldr(actualArgC, MemoryOperand(sp, 8));
__ Ldr(callField, MemoryOperand(method, JSMethod::GetCallFieldOffset(false)));
__ Tbnz(callField, JSMethod::IsNativeBit::START_BIT, &callNativeMethod);
@ -446,6 +454,7 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
{
Register nativeFuncAddr(X4);
__ Ldr(nativeFuncAddr, MemoryOperand(method, JSMethod::GetNativePointerOffset()));
// -8 : -8 means sp increase step
__ Str(nativeFuncAddr, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ CallAssemblerStub(RTSTUB_ID(CallBuiltinTrampoline), true);
}
@ -461,9 +470,10 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
__ Ldr(codeAddress, MemoryOperand(jsfunc, JSFunctionBase::CODE_ENTRY_OFFSET));
__ Lsr(callField, callField, JSMethod::NumArgsBits::START_BIT);
__ And(callField.W(), callField.W(),
LogicalImmediate::Create(JSMethod::NumArgsBits::Mask() >> JSMethod::NumArgsBits::START_BIT, 32));
LogicalImmediate::Create(JSMethod::NumArgsBits::Mask() >> JSMethod::NumArgsBits::START_BIT, RegWSize));
__ Add(expectedNumArgs, callField.W(), Immediate(NUM_MANDATORY_JSFUNC_ARGS));
__ Cmp(arg2.W(), expectedNumArgs);
// 8 : 8 mean argV = sp + 8
__ Add(argV, sp, Immediate(8));
__ B(Condition::HI, &directCallCodeEntry);
__ CallAssemblerStub(RTSTUB_ID(OptimizedCallOptimized), true);
@ -490,14 +500,13 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
Register frameType(X5);
Register fp(X29);
__ Mov(frameType, Immediate(static_cast<int64_t>(FrameType::OPTIMIZED_FRAME)));
__ Str(frameType, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Str(frameType, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
Register argVEnd(X4);
__ Add (argVEnd, fp, Immediate(16));
__ Add (argVEnd, fp, Immediate(GetStackArgOffSetToFp(0)));
// callee save
Register tmp(X19);
__ Str(tmp, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
Register boundLength(X2);
Register realArgC(X19, W);
Label copyBoundArgument;
@ -508,6 +517,7 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
// get bound length
__ Ldr(boundLength, MemoryOperand(boundLength, TaggedArray::LENGTH_OFFSET));
__ Add(realArgC, boundLength.W(), actualArgC.W());
// 3 : 3 mean *8
__ Add(argVEnd, argVEnd, Operand(actualArgC.W(), UXTW, 3));
__ Sub(actualArgC.W(), actualArgC.W(), Immediate(NUM_MANDATORY_JSFUNC_ARGS));
__ Cmp(actualArgC.W(), Immediate(0));
@ -515,8 +525,8 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
__ Bind(&copyArgument);
{
Register argValue(X5);
__ Ldr(argValue, MemoryOperand(argVEnd, -8, MemoryOperand::AddrMode::POSTINDEX));
__ Str(argValue, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Ldr(argValue, MemoryOperand(argVEnd, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ Str(argValue, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ Sub(actualArgC.W(), actualArgC.W(), Immediate(1));
__ B(Condition::HI, &copyArgument);
}
@ -529,12 +539,13 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
__ Cmp(boundLength.W(), Immediate(0));
__ B(Condition::EQ, &pushCallTarget);
__ Sub(boundLength.W(), boundLength.W(), Immediate(1));
// 3 : 3 means 2^3 = 8
__ Add(boundArgs, boundArgs, Operand(boundLength.W(), UXTW, 3));
__ Bind(&copyBoundArgumentLoop);
{
Register boundargValue(X5);
__ Ldr(boundargValue, MemoryOperand(boundArgs, -8, MemoryOperand::AddrMode::POSTINDEX));
__ Str(boundargValue, MemoryOperand(sp, -8, MemoryOperand::AddrMode::PREINDEX));
__ Ldr(boundargValue, MemoryOperand(boundArgs, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ Str(boundargValue, MemoryOperand(sp, -FRAME_SLOT_SIZE, MemoryOperand::AddrMode::PREINDEX));
__ Sub(boundLength.W(), boundLength.W(), Immediate(1));
__ B(Condition::HI, &copyBoundArgumentLoop);
}
@ -546,15 +557,16 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
Register boundTarget(X7);
__ Ldr(thisObj, MemoryOperand(jsfunc, JSBoundFunction::BOUND_THIS_OFFSET));
__ Mov(newTarget, Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Stp(newTarget, thisObj, MemoryOperand(sp, -16, MemoryOperand::AddrMode::PREINDEX));
__ Stp(newTarget, thisObj, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, MemoryOperand::AddrMode::PREINDEX));
__ Ldr(boundTarget, MemoryOperand(jsfunc, JSBoundFunction::BOUND_TARGET_OFFSET));
__ Stp(realArgC.X(), boundTarget, MemoryOperand(sp, -16, MemoryOperand::AddrMode::PREINDEX));
__ Stp(realArgC.X(), boundTarget, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, MemoryOperand::AddrMode::PREINDEX));
}
__ CallAssemblerStub(RTSTUB_ID(JSCall), false);
__ Add(sp, sp, Immediate(8));
__ Add(sp, sp, Immediate(FRAME_SLOT_SIZE));
// 3 : 3 means 2^3 = 8
__ Add(sp, sp, Operand(realArgC, UXTW, 3));
__ Ldr(realArgC, MemoryOperand(sp, -16, MemoryOperand::AddrMode::POSTINDEX));
__ Add(sp, sp, Immediate(8));
__ Ldr(realArgC, MemoryOperand(sp, FRAME_SLOT_SIZE, MemoryOperand::AddrMode::POSTINDEX));
__ Add(sp, sp, Immediate(FRAME_SLOT_SIZE));
__ RestoreFpAndLr();
__ Ret();
}
@ -569,17 +581,20 @@ void AssemblerStubs::JSCall(ExtendedAssembler *assembler)
Register taggedMessageId(X5);
__ SaveFpAndLr();
__ Mov(frameType, Immediate(static_cast<int64_t>(FrameType::OPTIMIZED_FRAME)));
__ Mov(taggedMessageId,
__ Mov(taggedMessageId,
Immediate(JSTaggedValue(GET_MESSAGE_STRING_ID(NonCallable)).GetRawData()));
__ Stp(taggedMessageId, frameType, MemoryOperand(sp, -16, MemoryOperand::AddrMode::PREINDEX));
// 2 : 2 means pair
__ Stp(taggedMessageId, frameType, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, MemoryOperand::AddrMode::PREINDEX));
Register argC(X5);
Register runtimeId(X6);
__ Mov(argC, Immediate(1));
__ Mov(runtimeId, RTSTUB_ID(ThrowTypeError));
__ Stp(argC, runtimeId, MemoryOperand(sp, -16, MemoryOperand::AddrMode::PREINDEX));
// 2 : 2 means pair
__ Stp(argC, runtimeId, MemoryOperand(sp, -FRAME_SLOT_SIZE * 2, MemoryOperand::AddrMode::PREINDEX));
__ CallAssemblerStub(RTSTUB_ID(CallRuntime), false);
__ Mov(Register(X0), Immediate(JSTaggedValue::VALUE_EXCEPTION));
__ Add(sp, sp, Immediate(32));
// 4 : 4 means stack slot
__ Add(sp, sp, Immediate(4 * FRAME_SLOT_SIZE));
__ RestoreFpAndLr();
__ Ret();
}
@ -691,7 +706,7 @@ void AssemblerStubs::PushCallIRangeAndDispatchNative(ExtendedAssembler *assemble
void AssemblerStubs::PushCallArgsAndDispatchNative(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(PushCallArgsAndDispatchNative));
__ Ret();
__ Ret();
}
void AssemblerStubs::ResumeRspAndDispatch(ExtendedAssembler *assembler)
@ -715,7 +730,6 @@ void AssemblerStubs::ResumeCaughtFrameAndDispatch(ExtendedAssembler *assembler)
void AssemblerStubs::GeneratorReEnterAsmInterp(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(GeneratorReEnterAsmInterp));
__ Ret();
__ Ret();
}
} // panda::ecmascript::aarch64

View File

@ -22,6 +22,7 @@
namespace panda::ecmascript::aarch64 {
class AssemblerStubs {
public:
static const int FRAME_SLOT_SIZE = 8;
static inline int64_t GetStackArgOffSetToFp(unsigned argId)
{
// +--------------------------+
@ -38,7 +39,7 @@ public:
// | frameType | v
// +--------------------------+ ---
// 16 : 16 means arguments offset to fp
return 16 + argId * 8; // 8 : 8 means size of each args
return 16 + argId * FRAME_SLOT_SIZE;
}
static void CallRuntime(ExtendedAssembler *assembler);

View File

@ -251,7 +251,7 @@ void AssemblerStubsX64::OptimizedCallOptimized(ExtendedAssembler *assembler)
__ Ret();
}
// uint64_t CallBuiltinTrampoline(uintptr_t glue, uintptr_t codeAddress, uint32_t argc, ...);
// uint64_t CallBuiltinTrampoline(uintptr_t glue, uintptr_t codeAddress, uint32_t argc, ...)
// webkit_jscc calling convention call runtime_id's runtion function(c-abi)
// Input:
// %rax - glue