Reason:add fast call and change calling conversion to cc

Description:add fast call and change calling conversion to cc
Issue:https://gitee.com/openharmony/arkcompiler_ets_runtime/issues/I6TTCX?from=project-issue

Signed-off-by: wupengyong <wupengyong@huawei.com>
Change-Id: I7c35619d73a4d961799232bc22b78b6513f65e71
This commit is contained in:
wupengyong 2023-05-18 19:13:51 +08:00
parent 39bf2e59aa
commit c0fa165ab0
69 changed files with 3153 additions and 665 deletions

View File

@ -116,9 +116,11 @@ ohos_source_set("libark_jsoptimizer_set") {
"trampoline/aarch64/asm_interpreter_call.cpp",
"trampoline/aarch64/common_call.cpp",
"trampoline/aarch64/optimized_call.cpp",
"trampoline/aarch64/optimized_fast_call.cpp",
"trampoline/x64/asm_interpreter_call.cpp",
"trampoline/x64/common_call.cpp",
"trampoline/x64/optimized_call.cpp",
"trampoline/x64/optimized_fast_call.cpp",
"ts_inline_lowering.cpp",
"ts_type_lowering.cpp",
"type.cpp",

View File

@ -106,7 +106,7 @@ void AnFileInfo::UpdateFuncEntries()
FuncEntryDes &funcDes = entries_[i];
funcDes.codeAddr_ += des.GetSecAddr(ElfSecName::TEXT);
if (funcDes.isMainFunc_) {
mainEntryMap_[funcDes.indexInKindOrMethodId_] = funcDes.codeAddr_;
mainEntryMap_[funcDes.indexInKindOrMethodId_] = std::make_pair(funcDes.codeAddr_, funcDes.isFastCall_);
#ifndef NDEBUG
LOG_COMPILER(INFO) << "AnFileInfo Load main method id: " << funcDes.indexInKindOrMethodId_
<< " code addr: " << reinterpret_cast<void *>(funcDes.codeAddr_);

View File

@ -36,13 +36,13 @@ public:
accumulateTotalSize(moduleDes.GetArkStackMapSize());
}
uintptr_t GetMainFuncEntry(uint32_t methodId) const
std::pair<uint64_t, bool> GetMainFuncEntry(uint32_t methodId) const
{
auto it = mainEntryMap_.find(methodId);
if (it == mainEntryMap_.end()) {
return 0;
return std::make_pair(0, false);
}
return static_cast<uintptr_t>(it->second);
return it->second;
}
void AlignTextSec()
@ -77,7 +77,7 @@ private:
void ParseFunctionEntrySection(ModuleSectionDes &moduleDes);
void UpdateFuncEntries();
uint64_t curTextSecOffset_ {0};
std::unordered_map<uint32_t, uint64_t> mainEntryMap_ {};
std::unordered_map<uint32_t, std::pair<uint64_t, bool>> mainEntryMap_ {};
bool isLoad_ {false};
friend class AnFileDataManager;

View File

@ -39,6 +39,7 @@ public:
uint64_t codeAddr_ {};
CallSignature::TargetKind kind_;
bool isMainFunc_ {};
bool isFastCall_ {};
uint32_t indexInKindOrMethodId_ {};
uint32_t moduleIndex_ {};
int fpDeltaPrevFrameSp_ {};
@ -108,7 +109,7 @@ public:
entryNum_ = n;
}
void AddEntry(CallSignature::TargetKind kind, bool isMainFunc, int indexInKind, uint64_t offset,
void AddEntry(CallSignature::TargetKind kind, bool isMainFunc, bool isFastCall, int indexInKind, uint64_t offset,
uint32_t moduleIndex, int delta, uint32_t size, CalleeRegAndOffsetVec info = {})
{
FuncEntryDes des;
@ -118,6 +119,7 @@ public:
}
des.kind_ = kind;
des.isMainFunc_ = isMainFunc;
des.isFastCall_ = isFastCall;
des.indexInKindOrMethodId_ = static_cast<uint32_t>(indexInKind);
des.codeAddr_ = offset;
des.moduleIndex_ = moduleIndex;

View File

@ -204,13 +204,16 @@ void AOTFileManager::SetAOTMainFuncEntry(JSHandle<JSFunction> mainFunc, const JS
const std::shared_ptr<AnFileInfo> anFileInfo = anFileDataManager->SafeGetAnFileInfo(anFileInfoIndex);
// get main func method
auto mainFuncMethodId = jsPandaFile->GetMainMethodIndex(entryPoint.data());
auto mainEntry = anFileInfo->GetMainFuncEntry(mainFuncMethodId);
uint64_t mainEntry;
bool isFastCall;
std::tie(mainEntry, isFastCall) = anFileInfo->GetMainFuncEntry(mainFuncMethodId);
MethodLiteral *mainMethod = jsPandaFile->FindMethodLiteral(mainFuncMethodId);
mainMethod->SetAotCodeBit(true);
mainMethod->SetNativeBit(false);
Method *method = mainFunc->GetCallTarget();
method->SetDeoptThreshold(vm_->GetJSOptions().GetDeoptThreshold());
method->SetCodeEntryAndMarkAOT(reinterpret_cast<uintptr_t>(mainEntry));
method->SetCodeEntryAndMarkAOT(static_cast<uintptr_t>(mainEntry));
method->SetIsFastCall(isFastCall);
#ifndef NDEBUG
PrintAOTEntry(jsPandaFile, method, mainEntry);
#endif
@ -231,6 +234,7 @@ void AOTFileManager::SetAOTFuncEntry(const JSPandaFile *jsPandaFile, Method *met
}
method->SetDeoptThreshold(vm_->GetJSOptions().GetDeoptThreshold());
method->SetCodeEntryAndMarkAOT(codeEntry);
method->SetIsFastCall(entry.isFastCall_);
}
void AOTFileManager::SetAOTFuncEntryForLiteral(const JSPandaFile *jsPandaFile, const TaggedArray *literal,

View File

@ -137,6 +137,16 @@ void ArgumentAccessor::CollectArgs()
if (args_.size() == 0) {
GateAccessor(circuit_).GetArgsOuts(args_);
std::reverse(args_.begin(), args_.end());
if (method_ == nullptr) {
return;
}
if (method_->IsFastCall() && args_.size() > 2) { // 2: mean have func and glue
GateRef actualArgcGate = circuit_->GetConstantGate(MachineType::I64, 0, GateType::NJSValue());
GateRef newTargetGate = circuit_->GetConstantGate(MachineType::I64, JSTaggedValue::VALUE_UNDEFINED,
GateType::UndefinedType());
args_.insert(args_.begin() + 1, actualArgcGate);
args_.insert(args_.begin() + 3, newTargetGate); // 3: newtarget index
}
}
}

View File

@ -32,6 +32,13 @@ enum class CommonArgIdx : uint8_t {
NUM_OF_ARGS,
};
enum class FastCallArgIdx : uint8_t {
GLUE = 0,
FUNC,
THIS_OBJECT,
NUM_OF_ARGS,
};
enum class FrameArgIdx : uint8_t {
FUNC = 0,
NEW_TARGET,

View File

@ -262,14 +262,6 @@ bool AssemblerModule::IsJumpToCallCommonEntry(JSCallMode mode)
return false;
}
#define DECLARE_ASM_STUB_X64_GENERATE(name) \
void name##Stub::GenerateX64(Assembler *assembler) \
{ \
x64::ExtendedAssembler *assemblerX64 = static_cast<x64::ExtendedAssembler*>(assembler); \
x64::AssemblerStubsX64::name(assemblerX64); \
assemblerX64->Align16(); \
}
#define DECLARE_JSCALL_TRAMPOLINE_X64_GENERATE(name) \
void name##Stub::GenerateX64(Assembler *assembler) \
{ \
@ -278,6 +270,14 @@ void name##Stub::GenerateX64(Assembler *assembler)
assemblerX64->Align16(); \
}
#define DECLARE_FAST_CALL_TRAMPOLINE_X64_GENERATE(name) \
void name##Stub::GenerateX64(Assembler *assembler) \
{ \
x64::ExtendedAssembler *assemblerX64 = static_cast<x64::ExtendedAssembler*>(assembler); \
x64::OptimizedFastCall::name(assemblerX64); \
assemblerX64->Align16(); \
}
#define DECLARE_ASM_INTERPRETER_TRAMPOLINE_X64_GENERATE(name) \
void name##Stub::GenerateX64(Assembler *assembler) \
{ \
@ -294,6 +294,13 @@ void name##Stub::GenerateAarch64(Assembler *assembler)
aarch64::OptimizedCall::name(assemblerAarch64); \
}
#define DECLARE_FAST_CALL_TRAMPOLINE_AARCH64_GENERATE(name) \
void name##Stub::GenerateAarch64(Assembler *assembler) \
{ \
aarch64::ExtendedAssembler *assemblerAarch64 = static_cast<aarch64::ExtendedAssembler*>(assembler); \
aarch64::OptimizedFastCall::name(assemblerAarch64); \
}
#define DECLARE_ASM_INTERPRETER_TRAMPOLINE_AARCH64_GENERATE(name) \
void name##Stub::GenerateAarch64(Assembler *assembler) \
{ \
@ -302,9 +309,15 @@ void name##Stub::GenerateAarch64(Assembler *assembler)
}
JS_CALL_TRAMPOLINE_LIST(DECLARE_JSCALL_TRAMPOLINE_X64_GENERATE)
FAST_CALL_TRAMPOLINE_LIST(DECLARE_FAST_CALL_TRAMPOLINE_X64_GENERATE)
ASM_INTERPRETER_TRAMPOLINE_LIST(DECLARE_ASM_INTERPRETER_TRAMPOLINE_X64_GENERATE)
JS_CALL_TRAMPOLINE_LIST(DECLARE_JSCALL_TRAMPOLINE_AARCH64_GENERATE)
FAST_CALL_TRAMPOLINE_LIST(DECLARE_FAST_CALL_TRAMPOLINE_AARCH64_GENERATE)
ASM_INTERPRETER_TRAMPOLINE_LIST(DECLARE_ASM_INTERPRETER_TRAMPOLINE_AARCH64_GENERATE)
#undef DECLARE_JSCALL_TRAMPOLINE_X64_GENERATE
#undef DECLARE_FAST_CALL_TRAMPOLINE_X64_GENERATE
#undef DECLARE_ASM_INTERPRETER_TRAMPOLINE_X64_GENERATE
#undef DECLARE_JSCALL_TRAMPOLINE_AARCH64_GENERATE
#undef DECLARE_FAST_CALL_TRAMPOLINE_AARCH64_GENERATE
#undef DECLARE_ASM_INTERPRETER_TRAMPOLINE_AARCH64_GENERATE
} // namespace panda::ecmascript::kunfu

View File

@ -513,12 +513,20 @@ void BytecodeCircuitBuilder::UpdateCFG()
void BytecodeCircuitBuilder::BuildCircuitArgs()
{
argAcc_.NewCommonArg(CommonArgIdx::GLUE, MachineType::I64, GateType::NJSValue());
argAcc_.NewCommonArg(CommonArgIdx::ACTUAL_ARGC, MachineType::I64, GateType::NJSValue());
auto funcIdx = static_cast<size_t>(CommonArgIdx::FUNC);
const size_t actualNumArgs = argAcc_.GetActualNumArgs();
// new actual argument gates
for (size_t argIdx = funcIdx; argIdx < actualNumArgs; argIdx++) {
argAcc_.NewArg(argIdx);
if (!method_->IsFastCall()) {
argAcc_.NewCommonArg(CommonArgIdx::ACTUAL_ARGC, MachineType::I64, GateType::NJSValue());
auto funcIdx = static_cast<size_t>(CommonArgIdx::FUNC);
const size_t actualNumArgs = argAcc_.GetActualNumArgs();
// new actual argument gates
for (size_t argIdx = funcIdx; argIdx < actualNumArgs; argIdx++) {
argAcc_.NewArg(argIdx);
}
} else {
auto funcIdx = static_cast<size_t>(FastCallArgIdx::FUNC);
size_t actualNumArgs = static_cast<size_t>(FastCallArgIdx::NUM_OF_ARGS) + method_->GetNumArgsWithCallField();
for (size_t argIdx = funcIdx; argIdx < actualNumArgs; argIdx++) {
argAcc_.NewArg(argIdx);
}
}
argAcc_.CollectArgs();
if (HasTypes()) {

View File

@ -68,7 +68,8 @@ void BytecodeInfoCollector::ProcessClasses()
auto &recordNames = bytecodeInfo_.GetRecordNames();
auto &methodPcInfos = bytecodeInfo_.GetMethodPcInfos();
std::vector<panda_file::File::EntityId> methodIndexes;
std::vector<panda_file::File::EntityId> classConstructIndexes;
for (const uint32_t index : classIndexes) {
panda_file::File::EntityId classId(index);
if (jsPandaFile_->IsExternal(classId)) {
@ -78,8 +79,10 @@ void BytecodeInfoCollector::ProcessClasses()
CString desc = utf::Mutf8AsCString(cda.GetDescriptor());
const CString recordName = JSPandaFile::ParseEntryPoint(desc);
cda.EnumerateMethods([this, methods, &methodIdx, pf, &processedInsns,
&recordNames, &methodPcInfos, &recordName] (panda_file::MethodDataAccessor &mda) {
&recordNames, &methodPcInfos, &recordName,
&methodIndexes, &classConstructIndexes] (panda_file::MethodDataAccessor &mda) {
auto methodId = mda.GetMethodId();
methodIndexes.emplace_back(methodId);
CollectFunctionTypeId(methodId);
// Generate all constpool
@ -110,7 +113,8 @@ void BytecodeInfoCollector::ProcessClasses()
auto it = processedInsns.find(insns);
if (it == processedInsns.end()) {
std::vector<std::string> classNameVec;
CollectMethodPcsFromBC(codeSize, insns, methodLiteral, classNameVec, recordName);
CollectMethodPcsFromBC(codeSize, insns, methodLiteral, classNameVec,
recordName, methodOffset, classConstructIndexes);
processedInsns[insns] = std::make_pair(methodPcInfos.size() - 1, methodOffset);
// collect className and literal offset for type infer
if (EnableCollectLiteralInfo()) {
@ -122,6 +126,15 @@ void BytecodeInfoCollector::ProcessClasses()
jsPandaFile_->SetMethodLiteralToMap(methodLiteral);
});
}
// class Construct need to use new target, can not fastcall
for (auto index : classConstructIndexes) {
MethodLiteral *method = jsPandaFile_->GetMethodLiteralByIndex(index.GetOffset());
if (method != nullptr) {
method->SetFunctionKind(FunctionKind::CLASS_CONSTRUCTOR);
method->SetIsFastCall(false);
bytecodeInfo_.ModifyMethodOffsetToCanFastCall(index.GetOffset(), false);
}
}
// Collect import(infer-needed) and export relationship among all records.
CollectRecordReferenceREL();
LOG_COMPILER(INFO) << "Total number of methods in file: "
@ -196,8 +209,8 @@ void BytecodeInfoCollector::StoreClassTypeOffset(const uint32_t typeOffset, std:
}
void BytecodeInfoCollector::CollectMethodPcsFromBC(const uint32_t insSz, const uint8_t *insArr,
const MethodLiteral *method, std::vector<std::string> &classNameVec,
const CString &recordName)
MethodLiteral *method, std::vector<std::string> &classNameVec, const CString &recordName,
uint32_t methodOffset, std::vector<panda_file::File::EntityId> &classConstructIndexes)
{
auto bcIns = BytecodeInst(insArr);
auto bcInsLast = bcIns.JumpTo(insSz);
@ -206,9 +219,14 @@ void BytecodeInfoCollector::CollectMethodPcsFromBC(const uint32_t insSz, const u
methodPcInfos.emplace_back(MethodPcInfo { {}, insSz });
auto &pcOffsets = methodPcInfos.back().pcOffsets;
const uint8_t *curPc = bcIns.GetAddress();
bool canFastCall = true;
while (bcIns.GetAddress() != bcInsLast.GetAddress()) {
CollectMethodInfoFromBC(bcIns, method, classNameVec, bcIndex);
bool fastCallFlag = true;
CollectMethodInfoFromBC(bcIns, method, classNameVec, bcIndex, classConstructIndexes, &fastCallFlag);
if (!fastCallFlag) {
canFastCall = false;
}
CollectModuleInfoFromBC(bcIns, method, recordName);
CollectConstantPoolIndexInfoFromBC(bcIns, method);
curPc = bcIns.GetAddress();
@ -217,6 +235,8 @@ void BytecodeInfoCollector::CollectMethodPcsFromBC(const uint32_t insSz, const u
pcOffsets.emplace_back(curPc);
bcIndex++;
}
bytecodeInfo_.SetMethodOffsetToCanFastCall(methodOffset, canFastCall);
method->SetIsFastCall(canFastCall);
}
void BytecodeInfoCollector::SetMethodPcInfoIndex(uint32_t methodOffset,
@ -337,8 +357,8 @@ void BytecodeInfoCollector::CollectInnerMethodsFromNewLiteral(const MethodLitera
}
void BytecodeInfoCollector::CollectMethodInfoFromBC(const BytecodeInstruction &bcIns,
const MethodLiteral *method, std::vector<std::string> &classNameVec,
int32_t bcIndex)
const MethodLiteral *method, std::vector<std::string> &classNameVec, int32_t bcIndex,
std::vector<panda_file::File::EntityId> &classConstructIndexes, bool *canFastCall)
{
if (!(bcIns.HasFlag(BytecodeInstruction::Flags::STRING_ID) &&
BytecodeInstruction::HasId(BytecodeInstruction::GetFormat(bcIns.GetOpcode()), 0))) {
@ -362,6 +382,7 @@ void BytecodeInfoCollector::CollectMethodInfoFromBC(const BytecodeInstruction &b
case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM8_ID16_ID16_IMM16_V8:{
auto entityId = jsPandaFile_->ResolveMethodIndex(method->GetMethodId(),
(bcIns.GetId <BytecodeInstruction::Format::IMM8_ID16_ID16_IMM16_V8, 0>()).AsRawValue());
classConstructIndexes.emplace_back(entityId);
classNameVec.emplace_back(GetClassName(entityId));
classDefBCIndexes_.insert(bcIndex);
methodId = entityId.GetOffset();
@ -374,6 +395,7 @@ void BytecodeInfoCollector::CollectMethodInfoFromBC(const BytecodeInstruction &b
case BytecodeInstruction::Opcode::DEFINECLASSWITHBUFFER_IMM16_ID16_ID16_IMM16_V8: {
auto entityId = jsPandaFile_->ResolveMethodIndex(method->GetMethodId(),
(bcIns.GetId <BytecodeInstruction::Format::IMM16_ID16_ID16_IMM16_V8, 0>()).AsRawValue());
classConstructIndexes.emplace_back(entityId);
classNameVec.emplace_back(GetClassName(entityId));
classDefBCIndexes_.insert(bcIndex);
methodId = entityId.GetOffset();
@ -427,6 +449,19 @@ void BytecodeInfoCollector::CollectMethodInfoFromBC(const BytecodeInstruction &b
NewLexEnvWithSize(method, imm);
break;
}
case EcmaOpcode::RESUMEGENERATOR:
case EcmaOpcode::SUSPENDGENERATOR_V8:
case EcmaOpcode::SUPERCALLTHISRANGE_IMM8_IMM8_V8:
case EcmaOpcode::WIDE_SUPERCALLTHISRANGE_PREF_IMM16_V8:
case EcmaOpcode::SUPERCALLARROWRANGE_IMM8_IMM8_V8:
case EcmaOpcode::WIDE_SUPERCALLARROWRANGE_PREF_IMM16_V8:
case EcmaOpcode::SUPERCALLSPREAD_IMM8_V8:
case EcmaOpcode::GETUNMAPPEDARGS:
case EcmaOpcode::COPYRESTARGS_IMM8:
case EcmaOpcode::WIDE_COPYRESTARGS_PREF_IMM16: {
*canFastCall = false;
return;
}
default:
break;
}

View File

@ -589,6 +589,31 @@ public:
return recordToImportRecordsInfo_;
}
bool IterateMethodOffsetToCanFastCall(uint32_t methodOffset, bool *isValid)
{
auto iter = methodOffsetToCanFastCall_.find(methodOffset);
if (iter != methodOffsetToCanFastCall_.end()) {
*isValid = true;
return iter->second;
}
*isValid = false;
return false;
}
void SetMethodOffsetToCanFastCall(uint32_t methodOffset, bool canFastCall)
{
if (methodOffsetToCanFastCall_.find(methodOffset) == methodOffsetToCanFastCall_.end()) {
methodOffsetToCanFastCall_.emplace(methodOffset, canFastCall);
}
}
void ModifyMethodOffsetToCanFastCall(uint32_t methodOffset, bool canFastCall)
{
methodOffsetToCanFastCall_.erase(methodOffset);
if (methodOffsetToCanFastCall_.find(methodOffset) == methodOffsetToCanFastCall_.end()) {
methodOffsetToCanFastCall_.emplace(methodOffset, canFastCall);
}
}
private:
std::vector<uint32_t> mainMethodIndexes_ {};
std::vector<CString> recordNames_ {};
@ -602,6 +627,7 @@ private:
std::unordered_map<uint32_t, uint32_t> functionTypeIdToMethodOffset_ {};
std::unordered_map<CString, ExportRecordInfo> recordNameToExportInfo_ {};
std::unordered_map<CString, ImportRecordInfo> recordToImportRecordsInfo_ {};
std::unordered_map<uint32_t, bool> methodOffsetToCanFastCall_ {};
};
class LexEnvManager {
@ -711,7 +737,8 @@ private:
const CString GetEntryFunName(const std::string_view &entryPoint) const;
void ProcessClasses();
void CollectMethodPcsFromBC(const uint32_t insSz, const uint8_t *insArr,
const MethodLiteral *method, std::vector<std::string> &classNameVec, const CString &recordName);
MethodLiteral *method, std::vector<std::string> &classNameVec, const CString &recordName,
uint32_t methodOffset, std::vector<panda_file::File::EntityId> &classConstructIndexes);
void SetMethodPcInfoIndex(uint32_t methodOffset, const std::pair<size_t, uint32_t> &processedMethodInfo);
void CollectInnerMethods(const MethodLiteral *method, uint32_t innerMethodOffset);
void CollectInnerMethods(uint32_t methodId, uint32_t innerMethodOffset);
@ -719,7 +746,9 @@ private:
void NewLexEnvWithSize(const MethodLiteral *method, uint64_t numOfLexVars);
void CollectInnerMethodsFromNewLiteral(const MethodLiteral *method, panda_file::File::EntityId literalId);
void CollectMethodInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method,
std::vector<std::string> &classNameVec, int32_t bcIndex);
std::vector<std::string> &classNameVec, int32_t bcIndex,
std::vector<panda_file::File::EntityId> &classConstructIndexes,
bool *canFastCall);
void CollectModuleInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method,
const CString &recordName);
void CollectConstantPoolIndexInfoFromBC(const BytecodeInstruction &bcIns, const MethodLiteral *method);

View File

@ -883,17 +883,37 @@ DEF_CALL_SIGNATURE(CallRuntimeWithArgv)
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(OptimizedCallOptimized)
DEF_CALL_SIGNATURE(OptimizedCallAndPushUndefined)
{
/* 4 : 4 input parameters */
CallSignature runtimeCallTrampoline("OptimizedCallOptimized", 0, 4,
/* 5 : 5 input parameters */
CallSignature optimizedCallAndPushUndefined("OptimizedCallAndPushUndefined", 0, 5,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = runtimeCallTrampoline;
std::array<VariableType, 4> params = { /* 4 : 4 input parameters */
VariableType::NATIVE_POINTER(),
VariableType::INT64(),
VariableType::INT64(),
VariableType::NATIVE_POINTER(),
*callSign = optimizedCallAndPushUndefined;
std::array<VariableType, 5> params = { /* 5 : 5 input parameters */
VariableType::NATIVE_POINTER(), // glue
VariableType::INT64(), // actual argC
VariableType::JS_ANY(), // call target
VariableType::JS_ANY(), // new target
VariableType::JS_ANY(), // thisobj
};
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv);
}
DEF_CALL_SIGNATURE(OptimizedFastCallAndPushUndefined)
{
/* 5 : 5 input parameters */
CallSignature optimizedFastCallAndPushUndefined("OptimizedFastCallAndPushUndefined", 0, 5,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = optimizedFastCallAndPushUndefined;
std::array<VariableType, 5> params = { /* 5 : 5 input parameters */
VariableType::NATIVE_POINTER(), // glue
VariableType::INT64(), // actual argC
VariableType::JS_ANY(), // call target
VariableType::JS_ANY(), // new target
VariableType::JS_ANY(), // thisobj
};
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
@ -920,10 +940,10 @@ DEF_CALL_SIGNATURE(JSCall)
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
}
DEF_CALL_SIGNATURE(JSAotCall)
DEF_CALL_SIGNATURE(JSOptimizedCall)
{
// 6 : 6 input parameters
CallSignature jSCall("JSAotCall", 0, 5,
CallSignature jSCall("JSOptimizedCall", 0, 5,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = jSCall;
std::array<VariableType, 5> params = { // 5 : 5 input parameters
@ -936,7 +956,24 @@ DEF_CALL_SIGNATURE(JSAotCall)
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv);
callSign->SetTargetKind(CallSignature::TargetKind::AOT_STUB);
callSign->SetTargetKind(CallSignature::TargetKind::OPTIMIZED_STUB);
}
DEF_CALL_SIGNATURE(JSOptimizedFastCall)
{
// 6 : 6 input parameters
CallSignature jSCall("JSOptimizedFastCall", 0, 3,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = jSCall;
std::array<VariableType, 5> params = { // 5 : 5 input parameters
VariableType::NATIVE_POINTER(), // glue
VariableType::JS_ANY(), // call target
VariableType::JS_ANY(), // thisobj
};
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
callSign->SetTargetKind(CallSignature::TargetKind::OPTIMIZED_FAST_CALL_STUB);
}
DEF_CALL_SIGNATURE(JSCallNew)
@ -992,6 +1029,23 @@ DEF_CALL_SIGNATURE(JSFunctionEntry)
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(OptimizedFastCallEntry)
{
// 4 : 4 input parameters
CallSignature optimizedFastCallEntry("OptimizedFastCallEntry", 0, 4,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = optimizedFastCallEntry;
std::array<VariableType, 4> params = { // 4 : 4 input parameters
VariableType::NATIVE_POINTER(), // glue
VariableType::INT64(), // argc
VariableType::NATIVE_POINTER(), // argv
VariableType::NATIVE_POINTER(), // prevFp
};
callSign->SetParameters(params.data());
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(ResumeRspAndDispatch)
{
// 8 : 8 input parameters
@ -1230,26 +1284,83 @@ DEF_CALL_SIGNATURE(JSCallWithArgV)
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(JSCallNewWithArgV)
DEF_CALL_SIGNATURE(JSFastCallWithArgV)
{
// 6 : 6 input parameters
CallSignature jsCallNewWithArgV("JSCallNewWithArgV", 0, 6,
// 4 : 4 input parameters
CallSignature jSFastCallWithArgV("JSFastCallWithArgV", 0, 4,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = jsCallNewWithArgV;
// 6 : 6 input parameters
std::array<VariableType, 6> params = {
*callSign = jSFastCallWithArgV;
// 4 : 4 input parameters
std::array<VariableType, 4> params = {
VariableType::NATIVE_POINTER(), // glue
VariableType::JS_ANY(), // jsfunc
VariableType::JS_ANY(), // this
VariableType::INT64(), // actualNumArgs
};
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(JSFastCallWithArgVAndPushUndefined)
{
// 4 : 4 input parameters
CallSignature jSCallWithArgV("JSFastCallWithArgVAndPushUndefined", 0, 4,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = jSCallWithArgV;
// 4 : 4 input parameters
std::array<VariableType, 4> params = {
VariableType::NATIVE_POINTER(), // glue
VariableType::JS_ANY(), // jsfunc
VariableType::JS_ANY(), // this
VariableType::INT64(), // actualNumArgs
};
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(JSCallWithArgVAndPushUndefined)
{
// 5 : 5 input parameters
CallSignature jSCallWithArgVAndPushUndefined("JSCallWithArgVAndPushUndefined", 0, 5,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = jSCallWithArgVAndPushUndefined;
// 5 : 5 input parameters
std::array<VariableType, 5> params = {
VariableType::NATIVE_POINTER(), // glue
VariableType::INT64(), // actualNumArgs
VariableType::JS_ANY(), // jsfunc
VariableType::JS_ANY(), // newTarget
VariableType::JS_ANY(), // this
VariableType::NATIVE_POINTER(), // argV
};
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(CallOptimized)
{
// 5 : 5 input parameters
CallSignature jSCall("CallOptimized", 0, 5,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_ANY());
*callSign = jSCall;
std::array<VariableType, 5> params = { // 5 : 5 input parameters
VariableType::NATIVE_POINTER(), // glue
VariableType::INT64(), // actual argC
VariableType::JS_ANY(), // call target
VariableType::JS_ANY(), // new target
VariableType::JS_ANY(), // thisobj
};
callSign->SetVariadicArgs(true);
callSign->SetParameters(params.data());
callSign->SetCallConv(CallSignature::CallConv::WebKitJSCallConv);
callSign->SetTargetKind(CallSignature::TargetKind::RUNTIME_STUB_NO_GC);
}
DEF_CALL_SIGNATURE(DebugPrint)
{
// 1 : 1 input parameters
@ -1660,6 +1771,29 @@ DEF_CALL_SIGNATURE(JsProxyCallInternal)
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(JsBoundCallInternal)
{
// 6 : 6 input parameters
CallSignature boundCallInternal("JsBoundCallInternal", 0, 6,
ArgumentsOrder::DEFAULT_ORDER, VariableType::JS_POINTER());
*callSign = boundCallInternal;
// 6 : 6 input parameters
std::array<VariableType, 6> params = {
VariableType::NATIVE_POINTER(), // glue
VariableType::INT64(), // actual argC
VariableType::JS_POINTER(), // callTarget
VariableType::NATIVE_POINTER(), // argv
VariableType::JS_POINTER(), // this
VariableType::JS_POINTER(), // new
};
callSign->SetVariadicArgs(false);
callSign->SetParameters(params.data());
callSign->SetTailCall(true);
callSign->SetGCLeafFunction(true);
callSign->SetTargetKind(CallSignature::TargetKind::COMMON_STUB);
callSign->SetCallConv(CallSignature::CallConv::CCallConv);
}
DEF_CALL_SIGNATURE(CreateArrayFromList)
{
// 3 : 3 input parameters

View File

@ -41,7 +41,8 @@ public:
RUNTIME_STUB,
RUNTIME_STUB_VARARGS,
RUNTIME_STUB_NO_GC,
AOT_STUB,
OPTIMIZED_STUB,
OPTIMIZED_FAST_CALL_STUB,
DEOPT_STUB,
BYTECODE_HANDLER,
BYTECODE_DEBUGGER_HANDLER,
@ -139,9 +140,14 @@ public:
return (GetTargetKind() == TargetKind::RUNTIME_STUB_NO_GC);
}
bool IsAotStub() const
bool IsOptimizedStub() const
{
return (GetTargetKind() == TargetKind::AOT_STUB);
return (GetTargetKind() == TargetKind::OPTIMIZED_STUB);
}
bool IsOptimizedFastCallStub() const
{
return (GetTargetKind() == TargetKind::OPTIMIZED_FAST_CALL_STUB);
}
bool IsBCDebuggerStub() const
@ -377,7 +383,8 @@ private:
V(AsmInterpreterEntry) \
V(GeneratorReEnterAsmInterp) \
V(CallRuntimeWithArgv) \
V(OptimizedCallOptimized) \
V(OptimizedCallAndPushUndefined) \
V(OptimizedFastCallAndPushUndefined) \
V(PushCallArg0AndDispatch) \
V(PushCallArgsAndDispatchNative) \
V(PushCallArg1AndDispatch) \
@ -396,6 +403,9 @@ private:
V(CallSetter) \
V(CallContainersArgs3) \
V(JSCallWithArgV) \
V(JSFastCallWithArgV) \
V(JSFastCallWithArgVAndPushUndefined) \
V(JSCallWithArgVAndPushUndefined) \
V(ResumeRspAndDispatch) \
V(ResumeRspAndReturn) \
V(ResumeCaughtFrameAndDispatch) \
@ -428,15 +438,18 @@ private:
V(CallThisRange) \
V(CallRange) \
V(JSCall) \
V(JSAotCall) \
V(JSOptimizedCall) \
V(JSOptimizedFastCall) \
V(JSFunctionEntry) \
V(OptimizedFastCallEntry) \
V(JSProxyCallInternalWithArgV) \
V(CreateArrayFromList) \
V(JSObjectGetMethod) \
V(JsProxyCallInternal) \
V(JsBoundCallInternal) \
V(DeoptHandlerAsm) \
V(JSCallNew) \
V(JSCallNewWithArgV) \
V(CallOptimized) \
V(TimeClip) \
V(SetDateValues) \
V(CallReturnWithArgv) \

View File

@ -191,7 +191,8 @@ public:
bool IsOptimizedJSFunctionFrame() const
{
return frameType_ == panda::ecmascript::FrameType::OPTIMIZED_JS_FUNCTION_FRAME;
return frameType_ == panda::ecmascript::FrameType::OPTIMIZED_JS_FUNCTION_FRAME
|| frameType_ == FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME;
}
bool GetDebugInfo(GateRef g, size_t &index) const;

View File

@ -583,6 +583,14 @@ inline GateRef CircuitBuilder::HasAotCode(GateRef method)
Int64(0));
}
inline GateRef CircuitBuilder::HasAotCodeAndFastCall(GateRef method)
{
GateRef callFieldOffset = IntPtr(Method::CALL_FIELD_OFFSET);
GateRef callfield = Load(VariableType::INT64(), method, callFieldOffset);
return Int64Equal(Int64And(callfield, Int64(Method::AOT_FASTCALL_BITS << MethodLiteral::IsAotCodeBit::START_BIT)),
Int64(Method::AOT_FASTCALL_BITS << MethodLiteral::IsAotCodeBit::START_BIT));
}
inline GateRef CircuitBuilder::IsJSFunction(GateRef obj)
{
GateRef objectType = GetObjectType(LoadHClass(obj));
@ -679,6 +687,18 @@ GateRef CircuitBuilder::IsClassConstructor(GateRef object)
Int32(0));
}
GateRef CircuitBuilder::IsConstructor(GateRef object)
{
GateRef hClass = LoadHClass(object);
GateRef bitfieldOffset = IntPtr(JSHClass::BIT_FIELD_OFFSET);
GateRef bitfield = Load(VariableType::INT32(), hClass, bitfieldOffset);
// decode
return Int32NotEqual(
Int32And(Int32LSR(bitfield, Int32(JSHClass::ConstructorBit::START_BIT)),
Int32((1LU << JSHClass::ConstructorBit::SIZE) - 1)),
Int32(0));
}
GateRef CircuitBuilder::IsClassPrototype(GateRef object)
{
GateRef hClass = LoadHClass(object);
@ -702,6 +722,15 @@ GateRef CircuitBuilder::IsExtensible(GateRef object)
Int32(0));
}
GateRef CircuitBuilder::GetExpectedNumOfArgs(GateRef method)
{
GateRef callFieldOffset = IntPtr(Method::CALL_FIELD_OFFSET);
GateRef callfield = Load(VariableType::INT64(), method, callFieldOffset);
return Int64And(
Int64LSR(callfield, Int64(MethodLiteral::NumArgsBits::START_BIT)),
Int64((1LU << MethodLiteral::NumArgsBits::SIZE) - 1));
}
GateRef CircuitBuilder::TaggedObjectIsEcmaObject(GateRef obj)
{
GateRef objectType = GetObjectType(LoadHClass(obj));

View File

@ -409,6 +409,19 @@ GateRef CircuitBuilder::JSCallTargetTypeCheck(GateType type, GateRef func, GateR
return ret;
}
GateRef CircuitBuilder::JSFastCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex)
{
auto currentLabel = env_->GetCurrentLabel();
auto currentControl = currentLabel->GetControl();
auto currentDepend = currentLabel->GetDepend();
auto frameState = acc_.FindNearestFrameState(currentDepend);
GateRef ret = GetCircuit()->NewGate(circuit_->JSFastCallTargetTypeCheck(static_cast<size_t>(type.Value())),
MachineType::I1, {currentControl, currentDepend, func, methodIndex, frameState}, GateType::NJSValue());
currentLabel->SetControl(ret);
currentLabel->SetDepend(ret);
return ret;
}
GateRef CircuitBuilder::JSCallThisTargetTypeCheck(GateType type, GateRef func)
{
auto currentLabel = env_->GetCurrentLabel();
@ -422,6 +435,19 @@ GateRef CircuitBuilder::JSCallThisTargetTypeCheck(GateType type, GateRef func)
return ret;
}
GateRef CircuitBuilder::JSFastCallThisTargetTypeCheck(GateType type, GateRef func)
{
auto currentLabel = env_->GetCurrentLabel();
auto currentControl = currentLabel->GetControl();
auto currentDepend = currentLabel->GetDepend();
auto frameState = acc_.FindNearestFrameState(currentDepend);
GateRef ret = GetCircuit()->NewGate(circuit_->JSFastCallThisTargetTypeCheck(static_cast<size_t>(type.Value())),
MachineType::I1, {currentControl, currentDepend, func, frameState}, GateType::NJSValue());
currentLabel->SetControl(ret);
currentLabel->SetDepend(ret);
return ret;
}
GateRef CircuitBuilder::DeoptCheck(GateRef condition, GateRef frameState, DeoptType type)
{
std::string comment = Deoptimizier::DisplayItems(type);
@ -725,6 +751,42 @@ GateRef CircuitBuilder::CallNGCRuntime(GateRef glue, int index, GateRef depend,
return result;
}
GateRef CircuitBuilder::FastCallOptimized(GateRef glue, GateRef code, GateRef depend, const std::vector<GateRef> &args,
GateRef hirGate)
{
const CallSignature *cs = RuntimeStubCSigns::GetOptimizedFastCallSign();
ASSERT(cs->IsOptimizedFastCallStub());
auto label = GetCurrentLabel();
if (depend == Gate::InvalidGateRef) {
depend = label->GetDepend();
}
GateRef filteredHirGate = Circuit::NullGate();
if (GetCircuit()->IsOptimizedJSFunctionFrame()) {
ASSERT(hirGate != Circuit::NullGate());
filteredHirGate = hirGate;
}
GateRef result = Call(cs, glue, code, depend, args, filteredHirGate);
return result;
}
GateRef CircuitBuilder::CallOptimized(GateRef glue, GateRef code, GateRef depend, const std::vector<GateRef> &args,
GateRef hirGate)
{
const CallSignature *cs = RuntimeStubCSigns::GetOptimizedCallSign();
ASSERT(cs->IsOptimizedStub());
auto label = GetCurrentLabel();
if (depend == Gate::InvalidGateRef) {
depend = label->GetDepend();
}
GateRef filteredHirGate = Circuit::NullGate();
if (GetCircuit()->IsOptimizedJSFunctionFrame()) {
ASSERT(hirGate != Circuit::NullGate());
filteredHirGate = hirGate;
}
GateRef result = Call(cs, glue, code, depend, args, filteredHirGate);
return result;
}
GateRef CircuitBuilder::CallStub(GateRef glue, GateRef hirGate, int index, const std::vector<GateRef> &args,
const char* comment)
{
@ -793,8 +855,10 @@ GateRef CircuitBuilder::Call(const CallSignature* cs, GateRef glue, GateRef targ
meta = circuit_->BuiltinsCallWithArgv(numValuesIn);
} else if (cs->IsRuntimeNGCStub()) {
meta = circuit_->NoGcRuntimeCall(numValuesIn);
} else if (cs->IsAotStub()) {
meta = circuit_->AotCall(numValuesIn);
} else if (cs->IsOptimizedStub()) {
meta = circuit_->CallOptimized(numValuesIn);
} else if (cs->IsOptimizedFastCallStub()) {
meta = circuit_->FastCallOptimized(numValuesIn);
} else {
LOG_ECMA(FATAL) << "unknown call operator";
UNREACHABLE();
@ -980,7 +1044,7 @@ GateRef CircuitBuilder::Construct(GateRef hirGate, std::vector<GateRef> args)
return callGate;
}
GateRef CircuitBuilder::TypedAotCall(GateRef hirGate, std::vector<GateRef> args)
GateRef CircuitBuilder::TypedCall(GateRef hirGate, std::vector<GateRef> args)
{
ASSERT(acc_.GetOpCode(hirGate) == OpCode::JS_BYTECODE);
auto currentLabel = env_->GetCurrentLabel();
@ -991,7 +1055,25 @@ GateRef CircuitBuilder::TypedAotCall(GateRef hirGate, std::vector<GateRef> args)
ASSERT(pcOffset != 0);
args.insert(args.begin(), currentDepend);
args.insert(args.begin(), currentControl);
auto callGate = GetCircuit()->NewGate(circuit_->TypedAotCall(bitfield, pcOffset), MachineType::I64,
auto callGate = GetCircuit()->NewGate(circuit_->TypedCall(bitfield, pcOffset), MachineType::I64,
args.size(), args.data(), GateType::AnyType());
currentLabel->SetControl(callGate);
currentLabel->SetDepend(callGate);
return callGate;
}
GateRef CircuitBuilder::TypedFastCall(GateRef hirGate, std::vector<GateRef> args)
{
ASSERT(acc_.GetOpCode(hirGate) == OpCode::JS_BYTECODE);
auto currentLabel = env_->GetCurrentLabel();
auto currentControl = currentLabel->GetControl();
auto currentDepend = currentLabel->GetDepend();
uint64_t bitfield = args.size();
uint64_t pcOffset = acc_.TryGetPcOffset(hirGate);
ASSERT(pcOffset != 0);
args.insert(args.begin(), currentDepend);
args.insert(args.begin(), currentControl);
auto callGate = GetCircuit()->NewGate(circuit_->TypedFastCall(bitfield, pcOffset), MachineType::I64,
args.size(), args.data(), GateType::AnyType());
currentLabel->SetControl(callGate);
currentLabel->SetDepend(callGate);

View File

@ -254,7 +254,9 @@ public:
GateRef TryPrimitiveTypeCheck(GateType type, GateRef gate);
GateRef CallTargetCheck(GateRef function, GateRef id, GateRef param, const char* comment = nullptr);
GateRef JSCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex);
GateRef JSFastCallTargetTypeCheck(GateType type, GateRef func, GateRef methodIndex);
GateRef JSCallThisTargetTypeCheck(GateType type, GateRef func);
GateRef JSFastCallThisTargetTypeCheck(GateType type, GateRef func);
GateRef DeoptCheck(GateRef condition, GateRef frameState, DeoptType type);
GateRef TypedCallOperator(GateRef hirGate, MachineType type, const std::initializer_list<GateRef>& args);
inline GateRef TypedCallBuiltin(GateRef hirGate, GateRef x, BuiltinsStubCSigns::ID id);
@ -351,6 +353,11 @@ public:
const char* comment = nullptr);
GateRef CallNGCRuntime(GateRef glue, int index, GateRef depend, const std::vector<GateRef> &args,
GateRef hirGate, const char* comment = nullptr);
GateRef FastCallOptimized(GateRef glue, GateRef code, GateRef depend, const std::vector<GateRef> &args,
GateRef hirGate);
GateRef CallOptimized(GateRef glue, GateRef code, GateRef depend, const std::vector<GateRef> &args,
GateRef hirGate);
GateRef CallStub(GateRef glue, GateRef hirGate, int index, const std::vector<GateRef> &args,
const char* comment = nullptr);
GateRef CallBuiltinRuntime(GateRef glue, GateRef depend, const std::vector<GateRef> &args,
@ -492,7 +499,8 @@ public:
GateRef LoadArrayLength(GateRef array);
GateRef HeapAlloc(GateRef initialHClass, GateType type, RegionSpaceFlag flag);
GateRef Construct(GateRef hirGate, std::vector<GateRef> args);
GateRef TypedAotCall(GateRef hirGate, std::vector<GateRef> args);
GateRef TypedCall(GateRef hirGate, std::vector<GateRef> args);
GateRef TypedFastCall(GateRef hirGate, std::vector<GateRef> args);
GateRef CallGetter(GateRef hirGate, GateRef receiver, GateRef propertyLookupResult, const char* comment = nullptr);
GateRef CallSetter(GateRef hirGate, GateRef receiver, GateRef propertyLookupResult,
GateRef value, const char* comment = nullptr);
@ -502,6 +510,7 @@ public:
// Object Operations
inline GateRef LoadHClass(GateRef object);
inline GateRef HasAotCode(GateRef method);
inline GateRef HasAotCodeAndFastCall(GateRef method);
inline GateRef IsJSFunction(GateRef obj);
inline GateRef IsDictionaryMode(GateRef object);
inline void StoreHClass(GateRef glue, GateRef object, GateRef hClass);
@ -515,8 +524,10 @@ public:
inline GateRef DoubleIsINF(GateRef x);
inline GateRef IsDictionaryElement(GateRef hClass);
inline GateRef IsClassConstructor(GateRef object);
inline GateRef IsConstructor(GateRef object);
inline GateRef IsClassPrototype(GateRef object);
inline GateRef IsExtensible(GateRef object);
inline GateRef GetExpectedNumOfArgs(GateRef method);
inline GateRef TaggedObjectIsEcmaObject(GateRef obj);
inline GateRef IsJSObject(GateRef obj);
inline GateRef TaggedObjectIsString(GateRef obj);

View File

@ -692,6 +692,67 @@ void NewJSObjectStubBuilder::GenerateCircuit()
Return(newBuilder.NewJSObject(glue, hclass));
}
void JsBoundCallInternalStubBuilder::GenerateCircuit()
{
auto env = GetEnvironment();
Label exit(env);
Label fastCall(env);
Label notFastCall(env);
Label methodIsFastCall(env);
Label fastCallBridge(env);
Label slowCall(env);
Label slowCallBridge(env);
GateRef glue = PtrArgument(0);
GateRef argc = Int64Argument(1);
GateRef func = TaggedPointerArgument(2); // callTarget
GateRef argv = PtrArgument(3);
GateRef thisValue = TaggedPointerArgument(4); // this
GateRef newTarget = TaggedPointerArgument(5); // new target
DEFVARIABLE(result, VariableType::JS_ANY(), Undefined());
GateRef method = GetMethodFromFunction(func);
GateRef callfield = Load(VariableType::INT64(), method, IntPtr(Method::CALL_FIELD_OFFSET));
GateRef expectedNum = Int64And(Int64LSR(callfield, Int64(MethodLiteral::NumArgsBits::START_BIT)),
Int64((1LU << MethodLiteral::NumArgsBits::SIZE) - 1));
GateRef expectedArgc = Int64Add(expectedNum, Int64(NUM_MANDATORY_JSFUNC_ARGS));
GateRef actualArgc = Int64Sub(argc, IntPtr(NUM_MANDATORY_JSFUNC_ARGS));
Branch(HasAotCodeAndFastCall(method), &methodIsFastCall, &notFastCall);
Bind(&methodIsFastCall);
{
Branch(Int64LessThanOrEqual(expectedArgc, argc), &fastCall, &fastCallBridge);
Bind(&fastCall);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSFastCallWithArgV),
{ glue, func, thisValue, actualArgc, argv });
Jump(&exit);
}
Bind(&fastCallBridge);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSFastCallWithArgVAndPushUndefined),
{ glue, func, thisValue, actualArgc, argv, expectedNum });
Jump(&exit);
}
}
Bind(&notFastCall);
{
Branch(Int64LessThanOrEqual(expectedArgc, argc), &slowCall, &slowCallBridge);
Bind(&slowCall);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgV),
{ glue, actualArgc, func, newTarget, thisValue, argv });
Jump(&exit);
}
Bind(&slowCallBridge);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgVAndPushUndefined),
{ glue, actualArgc, func, newTarget, thisValue, argv });
Jump(&exit);
}
}
Bind(&exit);
Return(*result);
}
void JsProxyCallInternalStubBuilder::GenerateCircuit()
{
auto env = GetEnvironment();
@ -705,6 +766,8 @@ void JsProxyCallInternalStubBuilder::GenerateCircuit()
GateRef argc = Int64Argument(1);
GateRef proxy = TaggedPointerArgument(2); // callTarget
GateRef argv = PtrArgument(3);
GateRef newTarget = Load(VariableType::JS_POINTER(), argv, IntPtr(sizeof(JSTaggedValue)));
GateRef thisTarget = Load(VariableType::JS_POINTER(), argv, IntPtr(2 * sizeof(JSTaggedValue)));
DEFVARIABLE(result, VariableType::JS_ANY(), Undefined());
@ -728,19 +791,98 @@ void JsProxyCallInternalStubBuilder::GenerateCircuit()
Branch(TaggedIsUndefined(method), &isUndefined, &isNotUndefined);
Bind(&isUndefined);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSProxyCallInternalWithArgV), {glue, target});
Return(*result);
Label isHeapObject(env);
Label slowPath(env);
Label isJsFcuntion(env);
Label notCallConstructor(env);
Label fastCall(env);
Label notFastCall(env);
Label slowCall(env);
Branch(TaggedIsHeapObject(target), &isHeapObject, &slowPath);
Bind(&isHeapObject);
{
Branch(IsJSFunction(target), &isJsFcuntion, &slowPath);
Bind(&isJsFcuntion);
{
Branch(IsClassConstructor(target), &slowPath, &notCallConstructor);
Bind(&notCallConstructor);
GateRef meth = GetMethodFromFunction(target);
GateRef actualArgc = Int64Sub(argc, IntPtr(NUM_MANDATORY_JSFUNC_ARGS));
GateRef actualArgv = PtrAdd(argv, IntPtr(NUM_MANDATORY_JSFUNC_ARGS * sizeof(JSTaggedValue)));
Branch(HasAotCodeAndFastCall(meth), &fastCall, &notFastCall);
Bind(&fastCall);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSFastCallWithArgV),
{ glue, target, thisTarget, actualArgc, actualArgv });
Jump(&exit);
}
Bind(&notFastCall);
{
Branch(HasAotCode(meth), &slowCall, &slowPath);
Bind(&slowCall);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgV),
{ glue, actualArgc, target, newTarget, thisTarget, actualArgv });
Jump(&exit);
}
}
}
}
Bind(&slowPath);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSProxyCallInternalWithArgV), {glue, target});
Jump(&exit);
}
}
Bind(&isNotUndefined);
{
Label isHeapObject1(env);
Label slowPath1(env);
Label isJsFcuntion1(env);
Label notCallConstructor1(env);
Label fastCall1(env);
Label notFastCall1(env);
Label slowCall1(env);
const int JSPROXY_NUM_ARGS = 3;
GateRef arrHandle = CallRuntime(glue, RTSTUB_ID(CreateArrayFromList), argc, argv);
// 2: this offset
GateRef thisArg = Load(VariableType::JS_POINTER(), argv, IntPtr(2 * sizeof(JSTaggedValue)));
GateRef numArgs = Int64(JSPROXY_NUM_ARGS + NUM_MANDATORY_JSFUNC_ARGS);
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{glue, numArgs, method, Undefined(), handler, target, thisArg, arrHandle});
Jump(&exit);
Branch(TaggedIsHeapObject(method), &isHeapObject1, &slowPath1);
Bind(&isHeapObject1);
{
Branch(IsJSFunction(method), &isJsFcuntion1, &slowPath1);
Bind(&isJsFcuntion1);
{
Branch(IsClassConstructor(method), &slowPath1, &notCallConstructor1);
Bind(&notCallConstructor1);
GateRef meth = GetMethodFromFunction(method);
GateRef code = GetAotCodeAddr(meth);
Branch(HasAotCodeAndFastCall(meth), &fastCall1, &notFastCall1);
Bind(&fastCall1);
{
result = FastCallOptimized(glue, code,
{ glue, method, handler, target, thisTarget, arrHandle });
Jump(&exit);
}
Bind(&notFastCall1);
{
Branch(HasAotCode(meth), &slowCall1, &slowPath1);
Bind(&slowCall1);
{
result = CallOptimized(glue, code,
{ glue, numArgs, method, Undefined(), handler, target, thisTarget, arrHandle });
Jump(&exit);
}
}
}
}
Bind(&slowPath1);
{
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, numArgs, method, Undefined(), handler, target, thisTarget, arrHandle });
Jump(&exit);
}
}
}
Bind(&exit);

View File

@ -76,6 +76,7 @@ namespace panda::ecmascript::kungfu {
V(CreateEmptyArray) \
V(CreateArrayWithBuffer) \
V(NewJSObject) \
V(JsBoundCallInternal) \
V(JsProxyCallInternal)
#define COMMON_STUB_ID_LIST(V) \

View File

@ -90,7 +90,7 @@ void Module::CollectFuncEntryInfo(std::map<uintptr_t, std::string> &addr2name, S
funcSize = codeBuff + assembler_->GetSectionSize(ElfSecName::TEXT) - entrys[j];
}
kungfu::CalleeRegAndOffsetVec info = assembler_->GetCalleeReg2Offset(func, log);
stubInfo.AddEntry(cs->GetTargetKind(), false, cs->GetID(), entrys[j] - codeBuff, moduleIndex, delta,
stubInfo.AddEntry(cs->GetTargetKind(), false, false, cs->GetID(), entrys[j] - codeBuff, moduleIndex, delta,
funcSize, info);
ASSERT(!cs->GetName().empty());
addr2name[entrys[j]] = cs->GetName();
@ -101,10 +101,10 @@ void Module::CollectFuncEntryInfo(std::map<uintptr_t, std::string> &addr2name, A
uint32_t moduleIndex, const CompilerLog &log)
{
auto engine = assembler_->GetEngine();
std::vector<std::tuple<uint64_t, size_t, int>> funcInfo; // entry idx delta
std::vector<std::tuple<uint64_t, size_t, int, bool>> funcInfo; // entry idx delta
std::vector<kungfu::CalleeRegAndOffsetVec> calleeSaveRegisters; // entry idx delta
// 1.Compile all functions and collect function infos
llvmModule_->IteratefuncIndexMap([&](size_t idx, LLVMValueRef func) {
llvmModule_->IteratefuncIndexMap([&](size_t idx, LLVMValueRef func, bool isFastCall) {
uint64_t funcEntry = reinterpret_cast<uintptr_t>(LLVMGetPointerToGlobal(engine, func));
uint64_t length = 0;
std::string funcName(LLVMGetValueName2(func, reinterpret_cast<size_t *>(&length)));
@ -112,7 +112,7 @@ void Module::CollectFuncEntryInfo(std::map<uintptr_t, std::string> &addr2name, A
addr2name[funcEntry] = funcName;
int delta = assembler_->GetFpDeltaPrevFramSp(func, log);
ASSERT(delta >= 0 && (delta % sizeof(uintptr_t) == 0));
funcInfo.emplace_back(std::tuple(funcEntry, idx, delta));
funcInfo.emplace_back(std::tuple(funcEntry, idx, delta, isFastCall));
kungfu::CalleeRegAndOffsetVec info = assembler_->GetCalleeReg2Offset(func, log);
calleeSaveRegisters.emplace_back(info);
});
@ -131,8 +131,9 @@ void Module::CollectFuncEntryInfo(std::map<uintptr_t, std::string> &addr2name, A
uint64_t funcEntry;
size_t idx;
int delta;
bool isFastCall;
uint32_t funcSize;
std::tie(funcEntry, idx, delta) = funcInfo[i];
std::tie(funcEntry, idx, delta, isFastCall) = funcInfo[i];
if (i < funcCount - 1) {
funcSize = std::get<0>(funcInfo[i + 1]) - funcEntry;
} else {
@ -141,7 +142,7 @@ void Module::CollectFuncEntryInfo(std::map<uintptr_t, std::string> &addr2name, A
auto found = addr2name[funcEntry].find(panda::ecmascript::JSPandaFile::ENTRY_FUNCTION_NAME);
bool isMainFunc = found != std::string::npos;
uint64_t offset = funcEntry - textAddr + aotInfo.GetCurTextSecOffset();
aotInfo.AddEntry(CallSignature::TargetKind::JSFUNCTION, isMainFunc, idx,
aotInfo.AddEntry(CallSignature::TargetKind::JSFUNCTION, isMainFunc, isFastCall, idx,
offset, moduleIndex, delta, funcSize, calleeSaveRegisters[i]);
}
aotInfo.UpdateCurTextSecOffset(textSize);
@ -225,7 +226,7 @@ void StubFileGenerator::CollectAsmStubCodeInfo(std::map<uintptr_t, std::string>
} else {
funSize = asmModule_.GetBufferSize() - entryOffset;
}
stubInfo_.AddEntry(cs->GetTargetKind(), false, cs->GetID(), entryOffset, bridgeModuleIdx, 0, funSize);
stubInfo_.AddEntry(cs->GetTargetKind(), false, false, cs->GetID(), entryOffset, bridgeModuleIdx, 0, funSize);
ASSERT(!cs->GetName().empty());
addr2name[entryOffset] = cs->GetName();
}

View File

@ -169,7 +169,9 @@ GateType GateAccessor::GetParamGateType(GateRef gate) const
GetOpCode(gate) == OpCode::TYPED_ARRAY_CHECK ||
GetOpCode(gate) == OpCode::INDEX_CHECK ||
GetOpCode(gate) == OpCode::JSCALLTARGET_TYPE_CHECK ||
GetOpCode(gate) == OpCode::JSCALLTHISTARGET_TYPE_CHECK);
GetOpCode(gate) == OpCode::JSCALLTHISTARGET_TYPE_CHECK ||
GetOpCode(gate) == OpCode::JSFASTCALLTARGET_TYPE_CHECK ||
GetOpCode(gate) == OpCode::JSFASTCALLTHISTARGET_TYPE_CHECK);
Gate *gatePtr = circuit_->LoadGatePtr(gate);
GateTypeAccessor accessor(gatePtr->GetOneParameterMetaData()->GetValue());
return accessor.GetGateType();
@ -250,7 +252,8 @@ uint32_t GateAccessor::TryGetPcOffset(GateRef gate) const
return gatePtr->GetJSBytecodeMetaData()->GetPcOffset();
case OpCode::TYPED_CALL_BUILTIN:
case OpCode::CONSTRUCT:
case OpCode::TYPEDAOTCALL:
case OpCode::TYPEDCALL:
case OpCode::TYPEDFASTCALL:
case OpCode::CALL_GETTER:
case OpCode::CALL_SETTER:
return static_cast<uint32_t>(gatePtr->GetOneParameterMetaData()->GetValue());

View File

@ -100,6 +100,7 @@ enum class DeoptType : uint8_t {
NEGTIVEINDEX,
LARGEINDEX,
INLINEFAIL,
NOTJSFASTCALLTGT,
};
enum class ICmpCondition : uint8_t {
@ -241,7 +242,8 @@ std::string MachineTypeToStr(MachineType machineType);
V(RuntimeCall, RUNTIME_CALL, GateFlags::NONE_FLAG, 0, 1, value) \
V(RuntimeCallWithArgv, RUNTIME_CALL_WITH_ARGV, GateFlags::NONE_FLAG, 0, 1, value) \
V(NoGcRuntimeCall, NOGC_RUNTIME_CALL, GateFlags::NONE_FLAG, 0, 1, value) \
V(AotCall, AOT_CALL, GateFlags::NONE_FLAG, 0, 1, value) \
V(CallOptimized, CALL_OPTIMIZED, GateFlags::NONE_FLAG, 0, 1, value) \
V(FastCallOptimized, FAST_CALL_OPTIMIZED, GateFlags::NONE_FLAG, 0, 1, value) \
V(Call, CALL, GateFlags::NONE_FLAG, 0, 1, value) \
V(BytecodeCall, BYTECODE_CALL, GateFlags::NONE_FLAG, 0, 1, value) \
V(DebuggerBytecodeCall, DEBUGGER_BYTECODE_CALL, GateFlags::NONE_FLAG, 0, 1, value) \
@ -252,7 +254,8 @@ std::string MachineTypeToStr(MachineType machineType);
#define GATE_META_DATA_LIST_WITH_PC_OFFSET(V) \
V(TypedCallBuiltin, TYPED_CALL_BUILTIN, GateFlags::NO_WRITE, 1, 1, value) \
V(Construct, CONSTRUCT, GateFlags::NONE_FLAG, 1, 1, value) \
V(TypedAotCall, TYPEDAOTCALL, GateFlags::NONE_FLAG, 1, 1, value)
V(TypedCall, TYPEDCALL, GateFlags::NONE_FLAG, 1, 1, value) \
V(TypedFastCall, TYPEDFASTCALL, GateFlags::NONE_FLAG, 1, 1, value)
#define GATE_META_DATA_LIST_WITH_PC_OFFSET_FIXED_VALUE(V) \
V(CallGetter, CALL_GETTER, GateFlags::NONE_FLAG, 1, 1, 2) \
@ -263,18 +266,20 @@ std::string MachineTypeToStr(MachineType machineType);
V(DependSelector, DEPEND_SELECTOR, GateFlags::FIXED, 1, value, 0) \
GATE_META_DATA_LIST_WITH_VALUE_IN(V)
#define GATE_META_DATA_LIST_WITH_GATE_TYPE(V) \
V(PrimitiveTypeCheck, PRIMITIVE_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \
V(ObjectTypeCheck, OBJECT_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \
V(JSCallTargetTypeCheck, JSCALLTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \
V(JSCallThisTargetTypeCheck, JSCALLTHISTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \
V(TypedArrayCheck, TYPED_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \
V(IndexCheck, INDEX_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \
V(TypedUnaryOp, TYPED_UNARY_OP, GateFlags::NO_WRITE, 1, 1, 1) \
V(TypedConditionJump, TYPED_CONDITION_JUMP, GateFlags::NO_WRITE, 1, 1, 1) \
V(TypedConvert, TYPE_CONVERT, GateFlags::NO_WRITE, 1, 1, 1) \
V(CheckAndConvert, CHECK_AND_CONVERT, GateFlags::NO_WRITE, 1, 1, 1) \
V(Convert, CONVERT, GateFlags::NONE_FLAG, 0, 0, 1) \
#define GATE_META_DATA_LIST_WITH_GATE_TYPE(V) \
V(PrimitiveTypeCheck, PRIMITIVE_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \
V(ObjectTypeCheck, OBJECT_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \
V(JSCallTargetTypeCheck, JSCALLTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \
V(JSFastCallTargetTypeCheck, JSFASTCALLTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \
V(JSCallThisTargetTypeCheck, JSCALLTHISTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \
V(JSFastCallThisTargetTypeCheck, JSFASTCALLTHISTARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 1)\
V(TypedArrayCheck, TYPED_ARRAY_CHECK, GateFlags::CHECKABLE, 1, 1, 1) \
V(IndexCheck, INDEX_CHECK, GateFlags::CHECKABLE, 1, 1, 2) \
V(TypedUnaryOp, TYPED_UNARY_OP, GateFlags::NO_WRITE, 1, 1, 1) \
V(TypedConditionJump, TYPED_CONDITION_JUMP, GateFlags::NO_WRITE, 1, 1, 1) \
V(TypedConvert, TYPE_CONVERT, GateFlags::NO_WRITE, 1, 1, 1) \
V(CheckAndConvert, CHECK_AND_CONVERT, GateFlags::NO_WRITE, 1, 1, 1) \
V(Convert, CONVERT, GateFlags::NONE_FLAG, 0, 0, 1) \
V(JSInlineTargetTypeCheck, JSINLINETARGET_TYPE_CHECK, GateFlags::CHECKABLE, 1, 1, 2)
#define GATE_META_DATA_LIST_WITH_VALUE(V) \

View File

@ -206,7 +206,7 @@ void LLVMIRGeneratorImpl::GenerateCodeForStub(Circuit *circuit, const ControlFlo
{
LLVMValueRef function = module_->GetFunction(index);
const CallSignature* cs = module_->GetCSign(index);
LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, cs->GetCallConv(), enableLog_, cs->GetName());
LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, cs->GetCallConv(), enableLog_, false, cs->GetName());
builder.Build();
}
@ -216,8 +216,14 @@ void LLVMIRGeneratorImpl::GenerateCode(Circuit *circuit, const ControlFlowGraph
{
auto function = module_->AddFunc(methodLiteral, jsPandaFile);
circuit->SetFrameType(FrameType::OPTIMIZED_JS_FUNCTION_FRAME);
LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, CallSignature::CallConv::WebKitJSCallConv,
enableLog_, methodName);
CallSignature::CallConv conv;
if (methodLiteral->IsFastCall()) {
conv = CallSignature::CallConv::CCallConv;
} else {
conv = CallSignature::CallConv::WebKitJSCallConv;
}
LLVMIRBuilder builder(&graph, circuit, module_, function, cfg, conv,
enableLog_, methodLiteral->IsFastCall(), methodName);
builder.Build();
}

View File

@ -57,9 +57,11 @@
namespace panda::ecmascript::kungfu {
LLVMIRBuilder::LLVMIRBuilder(const std::vector<std::vector<GateRef>> *schedule, Circuit *circuit,
LLVMModule *module, LLVMValueRef function, const CompilationConfig *cfg,
CallSignature::CallConv callConv, bool enableLog, const std::string &funcName)
CallSignature::CallConv callConv, bool enableLog, bool isFastCallAot,
const std::string &funcName)
: compCfg_(cfg), scheduledGates_(schedule), circuit_(circuit), acc_(circuit), module_(module->GetModule()),
function_(function), llvmModule_(module), callConv_(callConv), enableLog_(enableLog)
function_(function), llvmModule_(module), callConv_(callConv), enableLog_(enableLog),
isFastCallAot_(isFastCallAot)
{
context_ = module->GetContext();
builder_ = LLVMCreateBuilderInContext(context_);
@ -162,7 +164,8 @@ void LLVMIRBuilder::InitializeHandlers()
{OpCode::RUNTIME_CALL, &LLVMIRBuilder::HandleRuntimeCall},
{OpCode::RUNTIME_CALL_WITH_ARGV, &LLVMIRBuilder::HandleRuntimeCallWithArgv},
{OpCode::NOGC_RUNTIME_CALL, &LLVMIRBuilder::HandleCall},
{OpCode::AOT_CALL, &LLVMIRBuilder::HandleCall},
{OpCode::CALL_OPTIMIZED, &LLVMIRBuilder::HandleCall},
{OpCode::FAST_CALL_OPTIMIZED, &LLVMIRBuilder::HandleCall},
{OpCode::CALL, &LLVMIRBuilder::HandleCall},
{OpCode::BYTECODE_CALL, &LLVMIRBuilder::HandleBytecodeCall},
{OpCode::DEBUGGER_BYTECODE_CALL, &LLVMIRBuilder::HandleBytecodeCall},
@ -371,7 +374,14 @@ void LLVMIRBuilder::GenPrologue()
for (auto useIt = uses.begin(); useIt != uses.end(); ++useIt) {
int argth = static_cast<int>(acc_.TryGetValue(*useIt));
LLVMValueRef value = LLVMGetParam(function_, argth);
if (argth == static_cast<int>(CommonArgIdx::FUNC)) {
int funcIndex = 0;
if (isFastCallAot_) {
frameType = FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME;
funcIndex = static_cast<int>(FastCallArgIdx::FUNC);
} else {
funcIndex = static_cast<int>(CommonArgIdx::FUNC);
}
if (argth == funcIndex) {
SaveJSFuncOnOptJSFuncFrame(value);
SaveFrameTypeOnFrame(frameType, builder_);
}
@ -492,7 +502,8 @@ void LLVMIRBuilder::HandleCall(GateRef gate)
acc_.GetIns(gate, ins);
OpCode callOp = acc_.GetOpCode(gate);
if (callOp == OpCode::CALL || callOp == OpCode::NOGC_RUNTIME_CALL ||
callOp == OpCode::BUILTINS_CALL || callOp == OpCode::BUILTINS_CALL_WITH_ARGV || callOp == OpCode::AOT_CALL) {
callOp == OpCode::BUILTINS_CALL || callOp == OpCode::BUILTINS_CALL_WITH_ARGV ||
callOp == OpCode::CALL_OPTIMIZED || callOp == OpCode::FAST_CALL_OPTIMIZED) {
VisitCall(gate, ins, callOp);
} else {
LOG_ECMA(FATAL) << "this branch is unreachable";
@ -830,10 +841,22 @@ void LLVMIRBuilder::VisitCall(GateRef gate, const std::vector<GateRef> &inList,
rtbaseoffset = LLVMBuildAdd(builder_, glue, rtoffset, "");
callee = GetFunction(glue, calleeDescriptor, rtbaseoffset);
kind = GetCallExceptionKind(index, op);
} else if (op == OpCode::AOT_CALL) {
calleeDescriptor = RuntimeStubCSigns::GetAotCallSign();
} else if (op == OpCode::CALL_OPTIMIZED) {
calleeDescriptor = RuntimeStubCSigns::GetOptimizedCallSign();
callee = GetCallee(inList, calleeDescriptor);
kind = CallExceptionKind::HAS_PC_OFFSET;
if (IsOptimizedJSFunction()) {
kind = CallExceptionKind::HAS_PC_OFFSET;
} else {
kind = CallExceptionKind::NO_PC_OFFSET;
}
} else if (op == OpCode::FAST_CALL_OPTIMIZED) {
calleeDescriptor = RuntimeStubCSigns::GetOptimizedFastCallSign();
callee = GetCallee(inList, calleeDescriptor);
if (IsOptimizedJSFunction()) {
kind = CallExceptionKind::HAS_PC_OFFSET;
} else {
kind = CallExceptionKind::NO_PC_OFFSET;
}
} else {
ASSERT(op == OpCode::BUILTINS_CALL || op == OpCode::BUILTINS_CALL_WITH_ARGV);
LLVMValueRef opcodeOffset = gate2LValue_[inList[targetIndex]];
@ -2215,7 +2238,7 @@ void LLVMIRBuilder::GenDeoptEntry(LLVMModuleRef &module)
auto funcType = LLVMFunctionType(LLVMInt64TypeInContext(context_), paramTys.data(), paramTys.size(), 0);
auto function = LLVMAddFunction(module, Deoptimizier::GetLLVMDeoptRelocateSymbol(), funcType);
LLVMSetFunctionCallConv(function, LLVMCCallConv);
llvmModule_->SetFunction(LLVMModule::kDeoptEntryOffset, function);
llvmModule_->SetFunction(LLVMModule::kDeoptEntryOffset, function, false);
LLVMBasicBlockRef entry = LLVMAppendBasicBlockInContext(context_, function, "entry");
LLVMBuilderRef builder = LLVMCreateBuilderInContext(context_);
@ -2294,7 +2317,6 @@ LLVMValueRef LLVMIRBuilder::ConvertToTagged(GateRef gate)
case MachineType::F64:
return ConvertFloat64ToTaggedDouble(gate);
case MachineType::I64:
ASSERT(!acc_.GetGateType(gate).IsNJSValueType());
break;
default:
LOG_COMPILER(FATAL) << "unexpected machineType!";
@ -2351,6 +2373,7 @@ void LLVMIRBuilder::VisitDeoptCheck(GateRef gate)
GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC);
GateRef newTarget = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::NEW_TARGET);
GateRef thisObj = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::THIS_OBJECT);
GateRef actualArgc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::ACTUAL_ARGC);
// vreg
for (size_t i = 0; i < envIndex; i++) {
GateRef vregValue = acc_.GetValueIn(frameState, i);
@ -2383,6 +2406,8 @@ void LLVMIRBuilder::VisitDeoptCheck(GateRef gate)
// this object
int32_t specThisIndex = static_cast<int32_t>(SpecVregIndex::THIS_OBJECT_INDEX);
SaveDeoptVregInfo(values, specThisIndex, curDepth, shift, thisObj);
int32_t specArgcIndex = static_cast<int32_t>(SpecVregIndex::ACTUAL_ARGC_INDEX);
SaveDeoptVregInfo(values, specArgcIndex, curDepth, shift, actualArgc);
}
LLVMValueRef runtimeCall =
LLVMBuildCall3(builder_, funcType, callee, params.data(), params.size(), "", values.data(), values.size());
@ -2432,7 +2457,7 @@ void LLVMModule::InitialLLVMFuncTypeAndFuncByModuleCSigns()
const CallSignature* cs = callSigns_[i];
ASSERT(!cs->GetName().empty());
LLVMValueRef value = AddAndGetFunc(cs);
SetFunction(i, value);
SetFunction(i, value, false);
}
}
@ -2534,20 +2559,30 @@ LLVMValueRef LLVMModule::AddFunc(const panda::ecmascript::MethodLiteral *methodL
{
LLVMTypeRef returnType = NewLType(MachineType::I64, GateType::TaggedValue()); // possibly get it for circuit
LLVMTypeRef glue = NewLType(MachineType::I64, GateType::NJSValue());
LLVMTypeRef actualArgc = NewLType(MachineType::I64, GateType::NJSValue());
std::vector<LLVMTypeRef> paramTys = { glue, actualArgc };
auto funcIndex = static_cast<uint32_t>(CommonArgIdx::FUNC);
auto numOfComArgs = static_cast<uint32_t>(CommonArgIdx::NUM_OF_ARGS);
auto paramCount = methodLiteral->GetNumArgs() + numOfComArgs;
auto numOfRestArgs = paramCount - funcIndex;
paramTys.insert(paramTys.end(), numOfRestArgs, NewLType(MachineType::I64, GateType::TaggedValue()));
uint32_t paramCount = 0;
std::vector<LLVMTypeRef> paramTys = { glue };
if (!methodLiteral->IsFastCall()) {
LLVMTypeRef actualArgc = NewLType(MachineType::I64, GateType::NJSValue());
paramTys.emplace_back(actualArgc);
auto funcIndex = static_cast<uint32_t>(CommonArgIdx::FUNC);
auto numOfComArgs = static_cast<uint32_t>(CommonArgIdx::NUM_OF_ARGS);
paramCount = methodLiteral->GetNumArgs() + numOfComArgs;
auto numOfRestArgs = paramCount - funcIndex;
paramTys.insert(paramTys.end(), numOfRestArgs, NewLType(MachineType::I64, GateType::TaggedValue()));
} else {
auto funcIndex = static_cast<uint32_t>(FastCallArgIdx::FUNC);
auto numOfComArgs = static_cast<uint32_t>(FastCallArgIdx::NUM_OF_ARGS);
paramCount = methodLiteral->GetNumArgs() + numOfComArgs;
auto numOfRestArgs = paramCount - funcIndex;
paramTys.insert(paramTys.end(), numOfRestArgs, NewLType(MachineType::I64, GateType::TaggedValue()));
}
auto funcType = LLVMFunctionType(returnType, paramTys.data(), paramCount, false); // not variable args
std::string name = GetFuncName(methodLiteral, jsPandaFile);
auto offsetInPandaFile = methodLiteral->GetMethodId().GetOffset();
auto function = LLVMAddFunction(module_, name.c_str(), funcType);
ASSERT(offsetInPandaFile != LLVMModule::kDeoptEntryOffset);
SetFunction(offsetInPandaFile, function);
SetFunction(offsetInPandaFile, function, methodLiteral->IsFastCall());
return function;
}

View File

@ -127,17 +127,17 @@ public:
LLVMTypeRef GenerateFuncType(const std::vector<LLVMValueRef> &params, const CallSignature *stubDescriptor);
void SetFunction(size_t index, LLVMValueRef func)
void SetFunction(size_t index, LLVMValueRef func, bool isFastCall)
{
funcIndexMap_.emplace_back(std::make_pair(index, func));
funcIndexMap_.emplace_back(std::make_tuple(index, func, isFastCall));
}
LLVMValueRef GetFunction(size_t index)
{
// next optimization can be performed
for (auto &it: funcIndexMap_) {
if (it.first == index) {
return it.second;
if (std::get<0>(it) == index) {
return std::get<1>(it);
}
}
return nullptr;
@ -162,7 +162,8 @@ public:
void IteratefuncIndexMap(const Callback &cb) const
{
for (auto record : funcIndexMap_) {
cb(record.first, record.second);
// 2: 3nd param
cb(std::get<0>(record), std::get<1>(record), std::get<2>(record));
}
}
@ -220,7 +221,7 @@ private:
// index:
// stub scenario - sequence of function adding to llvmModule
// aot scenario - method Id of function generated by panda files
std::vector<std::pair<size_t, LLVMValueRef>> funcIndexMap_;
std::vector<std::tuple<size_t, LLVMValueRef, bool>> funcIndexMap_;
std::vector<const CallSignature *> callSigns_;
LLVMModuleRef module_ {nullptr};
LLVMContextRef context_ {nullptr};
@ -297,7 +298,7 @@ class LLVMIRBuilder {
public:
LLVMIRBuilder(const std::vector<std::vector<GateRef>> *schedule, Circuit *circuit,
LLVMModule *module, LLVMValueRef function, const CompilationConfig *cfg,
CallSignature::CallConv callConv, bool enableLog, const std::string &funcName);
CallSignature::CallConv callConv, bool enableLog, bool isFastCallAot, const std::string &funcName);
~LLVMIRBuilder();
void Build();
@ -434,6 +435,7 @@ private:
LLVMTypeRef slotType_ {nullptr};
CallSignature::CallConv callConv_ = CallSignature::CallConv::CCallConv;
bool enableLog_ {false};
bool isFastCallAot_ {false};
LLVMMetadataRef dFuncMD_ {nullptr};
};
} // namespace panda::ecmascript::kungfu

View File

@ -84,7 +84,8 @@ GateRef NumberSpeculativeRetype::VisitGate(GateRef gate)
case OpCode::CALL_SETTER:
case OpCode::LOAD_PROPERTY:
case OpCode::CONSTRUCT:
case OpCode::TYPEDAOTCALL:
case OpCode::TYPEDCALL:
case OpCode::TYPEDFASTCALL:
case OpCode::OBJECT_TYPE_CHECK:
return VisitWithConstantValue(gate, 1); // ignoreIndex
case OpCode::LOOP_EXIT_VALUE:
@ -94,7 +95,9 @@ GateRef NumberSpeculativeRetype::VisitGate(GateRef gate)
case OpCode::STABLE_ARRAY_CHECK:
case OpCode::TYPED_ARRAY_CHECK:
case OpCode::JSCALLTARGET_TYPE_CHECK:
case OpCode::JSFASTCALLTARGET_TYPE_CHECK:
case OpCode::JSCALLTHISTARGET_TYPE_CHECK:
case OpCode::JSFASTCALLTHISTARGET_TYPE_CHECK:
case OpCode::TYPED_CALL_CHECK:
case OpCode::HEAP_ALLOC:
case OpCode::TYPED_NEW_ALLOCATE_THIS:

View File

@ -21,7 +21,8 @@
namespace panda::ecmascript::kungfu {
CallSignature RuntimeStubCSigns::callSigns_[RuntimeStubCSigns::NUM_OF_RTSTUBS_WITHOUT_GC];
CallSignature RuntimeStubCSigns::aotCallSign_;
CallSignature RuntimeStubCSigns::optimizedCallSign_;
CallSignature RuntimeStubCSigns::optimizedFastCallSign_;
void RuntimeStubCSigns::Initialize()
{
@ -47,7 +48,8 @@ void RuntimeStubCSigns::Initialize()
RUNTIME_ASM_STUB_LIST(INIT_ASM_SIGNATURES)
#undef INIT_ASM_SIGNATURES
JSAotCallCallSignature::Initialize(&aotCallSign_);
JSOptimizedCallCallSignature::Initialize(&optimizedCallSign_);
JSOptimizedFastCallCallSignature::Initialize(&optimizedFastCallSign_);
}
void RuntimeStubCSigns::GetASMCSigns(std::vector<const CallSignature*>& outputCallSigns)

View File

@ -72,14 +72,20 @@ RUNTIME_STUB_LIST(DEF_STUB_NAME)
return "unknown";
}
static const CallSignature* GetAotCallSign()
static const CallSignature* GetOptimizedCallSign()
{
return &aotCallSign_;
return &optimizedCallSign_;
}
static const CallSignature* GetOptimizedFastCallSign()
{
return &optimizedFastCallSign_;
}
private:
static CallSignature callSigns_[NUM_OF_RTSTUBS_WITHOUT_GC];
static CallSignature aotCallSign_;
static CallSignature optimizedCallSign_;
static CallSignature optimizedFastCallSign_;
};
static_assert(static_cast<int>(kungfu::RuntimeStubCSigns::ID_CallRuntime) ==
static_cast<int>(kungfu::RuntimeStubCSigns::ASM_STUB_ID_CallRuntime));

View File

@ -60,8 +60,11 @@ void SlowPathLowering::CallRuntimeLowering()
case OpCode::CONSTRUCT:
LowerConstruct(gate);
break;
case OpCode::TYPEDAOTCALL:
LowerTypedAotCall(gate);
case OpCode::TYPEDCALL:
LowerTypedCall(gate);
break;
case OpCode::TYPEDFASTCALL:
LowerTypedFastCall(gate);
break;
case OpCode::UPDATE_HOTNESS:
LowerUpdateHotness(gate);
@ -113,15 +116,18 @@ void SlowPathLowering::DeleteLoopExitValue(GateRef gate)
acc_.ReplaceGate(gate, state, Circuit::NullGate(), value);
}
void SlowPathLowering::LowerToJSCall(GateRef hirGate, const std::vector<GateRef> &args)
void SlowPathLowering::LowerToJSCall(GateRef hirGate, const std::vector<GateRef> &args,
const std::vector<GateRef> &argsFastCall)
{
Label exit(&builder_);
DEFVAlUE(res, (&builder_), VariableType::JS_ANY(), builder_.Undefined());
GateRef func = args[static_cast<size_t>(CommonArgIdx::FUNC)];
GateRef argc = args[static_cast<size_t>(CommonArgIdx::ACTUAL_ARGC)];
LowerFastCall(hirGate, glue_, func, argc, args, argsFastCall, &res, &exit, false);
builder_.Bind(&exit);
GateRef stateInGate = builder_.GetState();
GateRef dependInGate = builder_.GetDepend();
const CallSignature *cs = RuntimeStubCSigns::Get(RTSTUB_ID(JSCall));
GateRef target = builder_.IntPtr(RTSTUB_ID(JSCall));
GateRef callGate = builder_.Call(cs, glue_, target, dependInGate, args, hirGate);
ReplaceHirWithPendingException(hirGate, stateInGate, callGate, callGate);
GateRef depend = builder_.GetDepend();
ReplaceHirWithPendingException(hirGate, stateInGate, depend, *res);
}
void SlowPathLowering::ReplaceHirWithPendingException(GateRef hirGate,
@ -917,7 +923,7 @@ void SlowPathLowering::LowerCallArg0(GateRef gate)
GateRef newTarget = builder_.Undefined();
GateRef thisObj = builder_.Undefined();
GateRef func = acc_.GetValueIn(gate, 0);
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj}, {glue_, func, thisObj});
}
void SlowPathLowering::LowerCallthisrangeImm8Imm8V8(GateRef gate)
@ -937,7 +943,12 @@ void SlowPathLowering::LowerCallthisrangeImm8Imm8V8(GateRef gate)
for (size_t i = fixedInputsNum; i < numIns - callTargetIndex; i++) {
vec.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec);
std::vector<GateRef> vec1 { glue_, callTarget, thisObj };
// add common args
for (size_t i = fixedInputsNum; i < numIns - callTargetIndex; i++) {
vec1.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec, vec1);
}
void SlowPathLowering::LowerWideCallthisrangePrefImm16V8(GateRef gate)
@ -957,7 +968,12 @@ void SlowPathLowering::LowerWideCallthisrangePrefImm16V8(GateRef gate)
for (size_t i = fixedInputsNum; i < numIns - callTargetIndex; i++) {
vec.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec);
std::vector<GateRef> vec1 {glue_, callTarget, thisObj};
// add common args
for (size_t i = fixedInputsNum; i < numIns - callTargetIndex; i++) {
vec1.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec, vec1);
}
void SlowPathLowering::LowerCallSpread(GateRef gate)
@ -982,11 +998,14 @@ void SlowPathLowering::LowerCallrangeImm8Imm8V8(GateRef gate)
GateRef newTarget = builder_.Undefined();
GateRef thisObj = builder_.Undefined();
std::vector<GateRef> vec {glue_, actualArgc, callTarget, newTarget, thisObj};
for (size_t i = 0; i < numArgs - callTargetIndex; i++) { // 2: skip acc
vec.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec);
std::vector<GateRef> vec1 {glue_, callTarget, thisObj};
for (size_t i = 0; i < numArgs - callTargetIndex; i++) { // 2: skip acc
vec1.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec, vec1);
}
void SlowPathLowering::LowerNewObjApply(GateRef gate)
@ -1654,6 +1673,7 @@ void SlowPathLowering::LowerNewObjRange(GateRef gate)
Label slowPath(&builder_);
Label threadCheck(&builder_);
Label successExit(&builder_);
Label exit(&builder_);
DEFVAlUE(result, (&builder_), VariableType::JS_ANY(), builder_.Undefined());
@ -1676,7 +1696,8 @@ void SlowPathLowering::LowerNewObjRange(GateRef gate)
for (size_t i = 1; i < range; ++i) {
args.emplace_back(acc_.GetValueIn(gate, i));
}
result = LowerCallNGCRuntime(gate, RTSTUB_ID(JSCallNew), args, true);
LowerFastCall(gate, glue_, ctor, actualArgc, args, args, &result, &exit, true);
builder_.Bind(&exit);
result = builder_.CallStub(glue_, gate, CommonStubCSigns::ConstructorCheck, { glue_, ctor, *result, thisObj });
builder_.Jump(&threadCheck);
}
@ -2823,7 +2844,7 @@ void SlowPathLowering::LowerCallthis0Imm8V8(GateRef gate)
GateRef newTarget = builder_.Undefined();
GateRef thisObj = acc_.GetValueIn(gate, 0);
GateRef func = acc_.GetValueIn(gate, 1);
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj}, {glue_, func, thisObj});
}
void SlowPathLowering::LowerCallArg1Imm8V8(GateRef gate)
@ -2837,12 +2858,13 @@ void SlowPathLowering::LowerCallArg1Imm8V8(GateRef gate)
GateRef a0Value = acc_.GetValueIn(gate, 0);
GateRef thisObj = builder_.Undefined();
GateRef func = acc_.GetValueIn(gate, 1); // acc
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0Value});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0Value}, {glue_, func, thisObj, a0Value});
}
void SlowPathLowering::LowerWideCallrangePrefImm16V8(GateRef gate)
{
std::vector<GateRef> vec;
std::vector<GateRef> vec1;
size_t numIns = acc_.GetNumValueIn(gate);
size_t fixedInputsNum = 1; // 1: acc
ASSERT(acc_.GetNumValueIn(gate) >= fixedInputsNum);
@ -2861,7 +2883,15 @@ void SlowPathLowering::LowerWideCallrangePrefImm16V8(GateRef gate)
for (size_t i = 0; i < numIns - fixedInputsNum; i++) { // skip acc
vec.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec);
vec.emplace_back(glue_);
vec.emplace_back(callTarget);
vec.emplace_back(thisObj);
// add args
for (size_t i = 0; i < numIns - fixedInputsNum; i++) { // skip acc
vec.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToJSCall(gate, vec, vec1);
}
void SlowPathLowering::LowerCallThisArg1(GateRef gate)
@ -2874,7 +2904,7 @@ void SlowPathLowering::LowerCallThisArg1(GateRef gate)
GateRef thisObj = acc_.GetValueIn(gate, 0);
GateRef a0 = acc_.GetValueIn(gate, 1); // 1:first parameter
GateRef func = acc_.GetValueIn(gate, 2); // 2:function
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0}, {glue_, func, thisObj, a0});
}
void SlowPathLowering::LowerCallargs2Imm8V8V8(GateRef gate)
@ -2889,7 +2919,7 @@ void SlowPathLowering::LowerCallargs2Imm8V8V8(GateRef gate)
GateRef a1 = acc_.GetValueIn(gate, 1); // 1:first parameter
GateRef func = acc_.GetValueIn(gate, 2); // 2:function
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0, a1});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0, a1}, {glue_, func, thisObj, a0, a1});
}
void SlowPathLowering::LowerCallargs3Imm8V8V8(GateRef gate)
@ -2905,7 +2935,7 @@ void SlowPathLowering::LowerCallargs3Imm8V8V8(GateRef gate)
GateRef a2 = acc_.GetValueIn(gate, 2);
GateRef func = acc_.GetValueIn(gate, 3);
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0, a1, a2});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0, a1, a2}, {glue_, func, thisObj, a0, a1, a2});
}
void SlowPathLowering::LowerCallthis2Imm8V8V8V8(GateRef gate)
@ -2920,7 +2950,8 @@ void SlowPathLowering::LowerCallthis2Imm8V8V8V8(GateRef gate)
GateRef a1Value = acc_.GetValueIn(gate, 2);
GateRef func = acc_.GetValueIn(gate, 3); //acc
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value},
{glue_, func, thisObj, a0Value, a1Value});
}
void SlowPathLowering::LowerCallthis3Imm8V8V8V8V8(GateRef gate)
@ -2935,7 +2966,8 @@ void SlowPathLowering::LowerCallthis3Imm8V8V8V8V8(GateRef gate)
GateRef a1Value = acc_.GetValueIn(gate, 2);
GateRef a2Value = acc_.GetValueIn(gate, 3);
GateRef func = acc_.GetValueIn(gate, 4);
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value, a2Value});
LowerToJSCall(gate, {glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value, a2Value},
{glue_, func, thisObj, a0Value, a1Value, a2Value});
}
void SlowPathLowering::LowerLdThisByName(GateRef gate)
@ -2961,24 +2993,130 @@ void SlowPathLowering::LowerConstPoolData(GateRef gate)
void SlowPathLowering::LowerConstruct(GateRef gate)
{
Environment env(gate, circuit_, &builder_);
const CallSignature *cs = RuntimeStubCSigns::Get(RTSTUB_ID(JSCallNew));
GateRef target = builder_.IntPtr(RTSTUB_ID(JSCallNew));
size_t num = acc_.GetNumValueIn(gate);
std::vector<GateRef> args(num);
for (size_t i = 0; i < num; ++i) {
args[i] = acc_.GetValueIn(gate, i);
}
GateRef state = builder_.GetState();
auto depend = builder_.GetDepend();
GateRef constructGate = builder_.Call(cs, glue_, target, depend, args, gate);
std::vector<GateRef> argsFastCall(num - 2); // 2:skip argc newtarget
size_t j = 0;
for (size_t i = 0; i < num; ++i) {
if (i != 1 && i != 3) { // 3: newtarget index
argsFastCall[j++] = acc_.GetValueIn(gate, i);
}
}
GateRef ctor = acc_.GetValueIn(gate, static_cast<size_t>(CommonArgIdx::FUNC));
GateRef argc = acc_.GetValueIn(gate, static_cast<size_t>(CommonArgIdx::ACTUAL_ARGC));
Label exit(&builder_);
DEFVAlUE(res, (&builder_), VariableType::JS_ANY(), builder_.Undefined());
LowerFastCall(gate, glue_, ctor, argc, args, argsFastCall, &res, &exit, true);
builder_.Bind(&exit);
GateRef thisObj = acc_.GetValueIn(gate, static_cast<size_t>(CommonArgIdx::THIS_OBJECT));
GateRef result = builder_.CallStub(
glue_, gate, CommonStubCSigns::ConstructorCheck, { glue_, ctor, constructGate, thisObj });
glue_, gate, CommonStubCSigns::ConstructorCheck, { glue_, ctor, *res, thisObj });
GateRef state = builder_.GetState();
ReplaceHirWithPendingException(gate, state, result, result);
}
void SlowPathLowering::LowerTypedAotCall(GateRef gate)
void SlowPathLowering::LowerFastCall(GateRef gate, GateRef glue, GateRef func, GateRef argc,
const std::vector<GateRef> &args, const std::vector<GateRef> &argsFastCall,
Variable *result, Label *exit, bool isNew)
{
Label isHeapObject(&builder_);
Label isJsFcuntion(&builder_);
Label fastCall(&builder_);
Label notFastCall(&builder_);
Label call(&builder_);
Label call1(&builder_);
Label slowCall(&builder_);
Label callBridge(&builder_);
Label callBridge1(&builder_);
Label slowPath(&builder_);
Label notCallConstructor(&builder_);
Label isCallConstructor(&builder_);
builder_.Branch(builder_.TaggedIsHeapObject(func), &isHeapObject, &slowPath);
builder_.Bind(&isHeapObject);
{
builder_.Branch(builder_.IsJSFunction(func), &isJsFcuntion, &slowPath);
builder_.Bind(&isJsFcuntion);
{
if (!isNew) {
builder_.Branch(builder_.IsClassConstructor(func), &slowPath, &notCallConstructor);
builder_.Bind(&notCallConstructor);
} else {
builder_.Branch(builder_.IsClassConstructor(func), &isCallConstructor, &slowPath);
builder_.Bind(&isCallConstructor);
}
GateRef method = builder_.GetMethodFromFunction(func);
if (!isNew) {
builder_.Branch(builder_.HasAotCodeAndFastCall(method), &fastCall, &notFastCall);
builder_.Bind(&fastCall);
{
GateRef expectedArgc = builder_.Int64Add(builder_.GetExpectedNumOfArgs(method),
builder_.Int64(NUM_MANDATORY_JSFUNC_ARGS));
builder_.Branch(builder_.Int64LessThanOrEqual(expectedArgc, argc), &call, &callBridge);
builder_.Bind(&call);
{
GateRef code = builder_.GetCodeAddr(method);
auto depend = builder_.GetDepend();
const CallSignature *cs = RuntimeStubCSigns::GetOptimizedFastCallSign();
result->WriteVariable(builder_.Call(cs, glue, code, depend, argsFastCall, gate));
builder_.Jump(exit);
}
builder_.Bind(&callBridge);
{
const CallSignature *cs = RuntimeStubCSigns::Get(RTSTUB_ID(OptimizedFastCallAndPushUndefined));
GateRef target = builder_.IntPtr(RTSTUB_ID(OptimizedFastCallAndPushUndefined));
auto depend = builder_.GetDepend();
result->WriteVariable(builder_.Call(cs, glue, target, depend, args, gate));
builder_.Jump(exit);
}
}
builder_.Bind(&notFastCall);
}
builder_.Branch(builder_.HasAotCode(method), &slowCall, &slowPath);
builder_.Bind(&slowCall);
{
GateRef expectedArgc = builder_.Int64Add(builder_.GetExpectedNumOfArgs(method),
builder_.Int64(NUM_MANDATORY_JSFUNC_ARGS));
builder_.Branch(builder_.Int64LessThanOrEqual(expectedArgc, argc), &call1, &callBridge1);
builder_.Bind(&call1);
{
GateRef code = builder_.GetCodeAddr(method);
auto depend = builder_.GetDepend();
const CallSignature *cs = RuntimeStubCSigns::GetOptimizedCallSign();
result->WriteVariable(builder_.Call(cs, glue, code, depend, args, gate));
builder_.Jump(exit);
}
builder_.Bind(&callBridge1);
{
const CallSignature *cs = RuntimeStubCSigns::Get(RTSTUB_ID(OptimizedCallAndPushUndefined));
GateRef target = builder_.IntPtr(RTSTUB_ID(OptimizedCallAndPushUndefined));
auto depend = builder_.GetDepend();
result->WriteVariable(builder_.Call(cs, glue, target, depend, args, gate));
builder_.Jump(exit);
}
}
}
}
builder_.Bind(&slowPath);
{
if (isNew) {
const CallSignature *cs = RuntimeStubCSigns::Get(RTSTUB_ID(JSCallNew));
GateRef target = builder_.IntPtr(RTSTUB_ID(JSCallNew));
auto depend = builder_.GetDepend();
result->WriteVariable(builder_.Call(cs, glue, target, depend, args, gate));
builder_.Jump(exit);
} else {
const CallSignature *cs = RuntimeStubCSigns::Get(RTSTUB_ID(JSCall));
GateRef target = builder_.IntPtr(RTSTUB_ID(JSCall));
auto depend = builder_.GetDepend();
result->WriteVariable(builder_.Call(cs, glue, target, depend, args, gate));
builder_.Jump(exit);
}
}
}
void SlowPathLowering::LowerTypedCall(GateRef gate)
{
Environment env(gate, circuit_, &builder_);
GateRef func = acc_.GetValueIn(gate, static_cast<size_t>(CommonArgIdx::FUNC));
@ -2991,7 +3129,25 @@ void SlowPathLowering::LowerTypedAotCall(GateRef gate)
}
GateRef state = builder_.GetState();
auto depend = builder_.GetDepend();
const CallSignature *cs = RuntimeStubCSigns::GetAotCallSign();
const CallSignature *cs = RuntimeStubCSigns::GetOptimizedCallSign();
GateRef result = builder_.Call(cs, glue_, code, depend, args, gate);
ReplaceHirWithPendingException(gate, state, result, result);
}
void SlowPathLowering::LowerTypedFastCall(GateRef gate)
{
Environment env(gate, circuit_, &builder_);
GateRef func = acc_.GetValueIn(gate, static_cast<size_t>(FastCallArgIdx::FUNC));
GateRef method = builder_.GetMethodFromFunction(func);
GateRef code = builder_.GetCodeAddr(method);
size_t num = acc_.GetNumValueIn(gate);
std::vector<GateRef> args(num);
for (size_t i = 0; i < num; ++i) {
args[i] = acc_.GetValueIn(gate, i);
}
GateRef state = builder_.GetState();
auto depend = builder_.GetDepend();
const CallSignature *cs = RuntimeStubCSigns::GetOptimizedFastCallSign();
GateRef result = builder_.Call(cs, glue_, code, depend, args, gate);
ReplaceHirWithPendingException(gate, state, result, result);
}

View File

@ -169,7 +169,9 @@ private:
void LowerTryLdGlobalByName(GateRef gate);
void LowerGetIterator(GateRef gate);
void LowerGetAsyncIterator(GateRef gate);
void LowerToJSCall(GateRef gate, const std::vector<GateRef> &args);
void LowerToJSCall(GateRef hirGate, const std::vector<GateRef> &args, const std::vector<GateRef> &argsFastCall);
void LowerFastCall(GateRef gate, GateRef glue, GateRef func, GateRef argc, const std::vector<GateRef> &args,
const std::vector<GateRef> &fastCallArgs, Variable *result, Label *exit, bool isNew);
void LowerCallArg0(GateRef gate);
void LowerCallArg1Imm8V8(GateRef gate);
void LowerCallThisArg1(GateRef gate);
@ -297,7 +299,8 @@ private:
void LowerLdThisByName(GateRef gate);
void LowerConstPoolData(GateRef gate);
void LowerConstruct(GateRef gate);
void LowerTypedAotCall(GateRef gate);
void LowerTypedCall(GateRef gate);
void LowerTypedFastCall(GateRef gate);
void LowerUpdateHotness(GateRef gate);
void LowerNotifyConcurrentResult(GateRef gate);
void LowerGetEnv(GateRef gate);

View File

@ -216,6 +216,23 @@ inline GateRef StubBuilder::CallNGCRuntime(GateRef glue, int index, const std::i
return result;
}
inline GateRef StubBuilder::FastCallOptimized(GateRef glue, GateRef code, const std::initializer_list<GateRef>& args)
{
GateRef result = env_->GetBuilder()->FastCallOptimized(glue, code, Gate::InvalidGateRef, args, Circuit::NullGate());
return result;
}
inline GateRef StubBuilder::CallOptimized(GateRef glue, GateRef code, const std::initializer_list<GateRef>& args)
{
GateRef result = env_->GetBuilder()->CallOptimized(glue, code, Gate::InvalidGateRef, args, Circuit::NullGate());
return result;
}
inline GateRef StubBuilder::GetAotCodeAddr(GateRef method)
{
return env_->GetBuilder()->GetCodeAddr(method);
}
inline GateRef StubBuilder::CallStub(GateRef glue, int index, const std::initializer_list<GateRef>& args)
{
SavePcIfNeeded(glue);
@ -2112,6 +2129,11 @@ inline GateRef StubBuilder::HasAotCode(GateRef method)
Int64(0));
}
inline GateRef StubBuilder::HasAotCodeAndFastCall(GateRef method)
{
return env_->GetBuilder()->HasAotCodeAndFastCall(method);
}
inline GateRef StubBuilder::GetExpectedNumOfArgs(GateRef method)
{
GateRef callFieldOffset = IntPtr(Method::CALL_FIELD_OFFSET);

View File

@ -4858,6 +4858,7 @@ GateRef StubBuilder::JSCallDispatch(GateRef glue, GateRef func, GateRef actualNu
FUNC_CALL_BACK(func)
Label funcIsClassConstructor(env);
Label funcNotClassConstructor(env);
Label methodNotAot(env);
if (!AssemblerModule::IsCallNew(mode)) {
Branch(IsClassConstructorFromBitField(bitfield), &funcIsClassConstructor, &funcNotClassConstructor);
Bind(&funcIsClassConstructor);
@ -4866,97 +4867,349 @@ GateRef StubBuilder::JSCallDispatch(GateRef glue, GateRef func, GateRef actualNu
Jump(&exit);
}
Bind(&funcNotClassConstructor);
} else {
GateRef hclass = LoadHClass(func);
GateRef bitfield1 = Load(VariableType::INT32(), hclass, IntPtr(JSHClass::BIT_FIELD_OFFSET));
Branch(IsClassConstructorFromBitField(bitfield1), &funcIsClassConstructor, &methodNotAot);
Bind(&funcIsClassConstructor);
}
GateRef sp = 0;
if (env->IsAsmInterp()) {
sp = Argument(static_cast<size_t>(InterpreterHandlerInputs::SP));
}
Label methodisAot(env);
Label methodNotAot(env);
Label methodIsFastCall(env);
Label methodNotFastCall(env);
Label fastCall(env);
Label fastCallBridge(env);
Label slowCall(env);
Label slowCallBridge(env);
{
GateRef isAotMask = Int64(static_cast<uint64_t>(1) << MethodLiteral::IsAotCodeBit::START_BIT);
Branch(Int64Equal(Int64And(callField, isAotMask), Int64(0)), &methodNotAot, &methodisAot);
Bind(&methodisAot);
GateRef newTarget = Undefined();
GateRef thisValue = Undefined();
GateRef realNumArgs = Int64Add(ZExtInt32ToInt64(actualNumArgs), Int64(NUM_MANDATORY_JSFUNC_ARGS));
GateRef isFastMask = Int64(0x5LL << MethodLiteral::IsAotCodeBit::START_BIT);
Branch(Int64Equal(Int64And(callField, isFastMask), isFastMask), &methodIsFastCall, &methodNotFastCall);
Bind(&methodIsFastCall);
{
GateRef newTarget = Undefined();
GateRef thisValue = Undefined();
GateRef realNumArgs = Int64Add(ZExtInt32ToInt64(actualNumArgs), Int64(NUM_MANDATORY_JSFUNC_ARGS));
switch (mode) {
case JSCallMode::CALL_THIS_ARG0:
thisValue = data[0];
[[fallthrough]];
case JSCallMode::CALL_ARG0:
case JSCallMode::DEPRECATED_CALL_ARG0:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, realNumArgs, func, newTarget, thisValue});
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG1:
thisValue = data[1];
[[fallthrough]];
case JSCallMode::CALL_ARG1:
case JSCallMode::DEPRECATED_CALL_ARG1:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, realNumArgs, func, newTarget, thisValue, data[0] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG2:
thisValue = data[2];
[[fallthrough]];
case JSCallMode::CALL_ARG2:
case JSCallMode::DEPRECATED_CALL_ARG2:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, realNumArgs, func, newTarget, thisValue, data[0], data[1] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3:
thisValue = data[3];
[[fallthrough]];
case JSCallMode::CALL_ARG3:
case JSCallMode::DEPRECATED_CALL_ARG3:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, realNumArgs, func, newTarget, thisValue,
data[0], data[1], data[2] }); // 2: args2
Jump(&exit);
break;
case JSCallMode::CALL_THIS_WITH_ARGV:
case JSCallMode::CALL_THIS_ARGV_WITH_RETURN:
case JSCallMode::DEPRECATED_CALL_THIS_WITH_ARGV:
thisValue = data[2]; // 2: this input
[[fallthrough]];
case JSCallMode::CALL_WITH_ARGV:
case JSCallMode::DEPRECATED_CALL_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgV),
{ glue, ZExtInt32ToInt64(actualNumArgs), func, newTarget, thisValue, data[1] });
Jump(&exit);
break;
case JSCallMode::DEPRECATED_CALL_CONSTRUCTOR_WITH_ARGV:
case JSCallMode::CALL_CONSTRUCTOR_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallNewWithArgV),
{ glue, ZExtInt32ToInt64(actualNumArgs), func, func, data[2], data[1]});
result = ConstructorCheck(glue, func, *result, data[2]); // 2: the second index
Jump(&exit);
break;
case JSCallMode::CALL_GETTER:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, realNumArgs, func, newTarget, data[0]});
Jump(&exit);
break;
case JSCallMode::CALL_SETTER:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, realNumArgs, func, newTarget, data[0], data[1]});
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3_WITH_RETURN:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCall),
{ glue, realNumArgs, func, newTarget, data[0], data[1], data[2], data[3] });
Jump(&exit);
break;
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
GateRef expectedNum = Int64And(Int64LSR(callField, Int64(MethodLiteral::NumArgsBits::START_BIT)),
Int64((1LU << MethodLiteral::NumArgsBits::SIZE) - 1));
GateRef expectedArgc = Int64Add(expectedNum, Int64(NUM_MANDATORY_JSFUNC_ARGS));
Branch(Int64LessThanOrEqual(expectedArgc, realNumArgs), &fastCall, &fastCallBridge);
Bind(&fastCall);
{
GateRef code = GetAotCodeAddr(method);
switch (mode) {
case JSCallMode::CALL_THIS_ARG0:
thisValue = data[0];
[[fallthrough]];
case JSCallMode::CALL_ARG0:
case JSCallMode::DEPRECATED_CALL_ARG0:
result = FastCallOptimized(glue, code, { glue, func, thisValue});
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG1:
thisValue = data[1];
[[fallthrough]];
case JSCallMode::CALL_ARG1:
case JSCallMode::DEPRECATED_CALL_ARG1:
result = FastCallOptimized(glue, code, { glue, func, thisValue, data[0] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG2:
thisValue = data[2];
[[fallthrough]];
case JSCallMode::CALL_ARG2:
case JSCallMode::DEPRECATED_CALL_ARG2:
result = FastCallOptimized(glue, code, { glue, func, thisValue, data[0], data[1] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3:
thisValue = data[3];
[[fallthrough]];
case JSCallMode::CALL_ARG3:
case JSCallMode::DEPRECATED_CALL_ARG3:
result = FastCallOptimized(glue, code, { glue, func, thisValue, data[0], data[1], data[2] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_WITH_ARGV:
case JSCallMode::CALL_THIS_ARGV_WITH_RETURN:
case JSCallMode::DEPRECATED_CALL_THIS_WITH_ARGV:
thisValue = data[2]; // 2: this input
[[fallthrough]];
case JSCallMode::CALL_WITH_ARGV:
case JSCallMode::DEPRECATED_CALL_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSFastCallWithArgV),
{ glue, func, thisValue, ZExtInt32ToInt64(actualNumArgs), data[1] });
Jump(&exit);
break;
case JSCallMode::DEPRECATED_CALL_CONSTRUCTOR_WITH_ARGV:
case JSCallMode::CALL_CONSTRUCTOR_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSFastCallWithArgV),
{ glue, func, data[2], ZExtInt32ToInt64(actualNumArgs), data[1]});
result = ConstructorCheck(glue, func, *result, data[2]); // 2: the second index
Jump(&exit);
break;
case JSCallMode::CALL_GETTER:
result = FastCallOptimized(glue, code, { glue, func, data[0] });
Jump(&exit);
break;
case JSCallMode::CALL_SETTER:
result = FastCallOptimized(glue, code, { glue, func, data[0], data[1] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3_WITH_RETURN:
result = FastCallOptimized(glue, code, { glue, func, data[0], data[1], data[2], data[3] });
Jump(&exit);
break;
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
}
Bind(&fastCallBridge);
{
switch (mode) {
case JSCallMode::CALL_THIS_ARG0:
thisValue = data[0];
[[fallthrough]];
case JSCallMode::CALL_ARG0:
case JSCallMode::DEPRECATED_CALL_ARG0:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedFastCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue});
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG1:
thisValue = data[1];
[[fallthrough]];
case JSCallMode::CALL_ARG1:
case JSCallMode::DEPRECATED_CALL_ARG1:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedFastCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue, data[0] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG2:
thisValue = data[2];
[[fallthrough]];
case JSCallMode::CALL_ARG2:
case JSCallMode::DEPRECATED_CALL_ARG2:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedFastCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue, data[0], data[1] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3:
thisValue = data[3];
[[fallthrough]];
case JSCallMode::CALL_ARG3:
case JSCallMode::DEPRECATED_CALL_ARG3:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedFastCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue,
data[0], data[1], data[2] }); // 2: args2
Jump(&exit);
break;
case JSCallMode::CALL_THIS_WITH_ARGV:
case JSCallMode::CALL_THIS_ARGV_WITH_RETURN:
case JSCallMode::DEPRECATED_CALL_THIS_WITH_ARGV:
thisValue = data[2]; // 2: this input
[[fallthrough]];
case JSCallMode::CALL_WITH_ARGV:
case JSCallMode::DEPRECATED_CALL_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSFastCallWithArgVAndPushUndefined),
{ glue, func, thisValue, ZExtInt32ToInt64(actualNumArgs), data[1], expectedNum });
Jump(&exit);
break;
case JSCallMode::DEPRECATED_CALL_CONSTRUCTOR_WITH_ARGV:
case JSCallMode::CALL_CONSTRUCTOR_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSFastCallWithArgVAndPushUndefined),
{ glue, func, data[2], ZExtInt32ToInt64(actualNumArgs), data[1], expectedNum });
result = ConstructorCheck(glue, func, *result, data[2]); // 2: the second index
Jump(&exit);
break;
case JSCallMode::CALL_GETTER:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedFastCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, data[0]});
Jump(&exit);
break;
case JSCallMode::CALL_SETTER:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedFastCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, data[0], data[1]});
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3_WITH_RETURN:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedFastCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, data[0], data[1], data[2], data[3] });
Jump(&exit);
break;
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
}
}
Bind(&methodNotFastCall);
GateRef isAotMask = Int64(static_cast<uint64_t>(1) << MethodLiteral::IsAotCodeBit::START_BIT);
Branch(Int64Equal(Int64And(callField, isAotMask), Int64(0)), &methodNotAot, &methodisAot);
Bind(&methodisAot);
{
GateRef expectedNum = Int64And(Int64LSR(callField, Int64(MethodLiteral::NumArgsBits::START_BIT)),
Int64((1LU << MethodLiteral::NumArgsBits::SIZE) - 1));
GateRef expectedArgc = Int64Add(expectedNum, Int64(NUM_MANDATORY_JSFUNC_ARGS));
Branch(Int64LessThanOrEqual(expectedArgc, realNumArgs), &slowCall, &slowCallBridge);
Bind(&slowCall);
{
GateRef code = GetAotCodeAddr(method);
switch (mode) {
case JSCallMode::CALL_THIS_ARG0:
thisValue = data[0];
[[fallthrough]];
case JSCallMode::CALL_ARG0:
case JSCallMode::DEPRECATED_CALL_ARG0:
result = CallOptimized(glue, code, { glue, realNumArgs, func, newTarget, thisValue });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG1:
thisValue = data[1];
[[fallthrough]];
case JSCallMode::CALL_ARG1:
case JSCallMode::DEPRECATED_CALL_ARG1:
result = CallOptimized(glue, code, { glue, realNumArgs, func, newTarget, thisValue, data[0] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG2:
thisValue = data[2];
[[fallthrough]];
case JSCallMode::CALL_ARG2:
case JSCallMode::DEPRECATED_CALL_ARG2:
result = CallOptimized(glue, code,
{ glue, realNumArgs, func, newTarget, thisValue, data[0], data[1] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3:
thisValue = data[3];
[[fallthrough]];
case JSCallMode::CALL_ARG3:
case JSCallMode::DEPRECATED_CALL_ARG3:
result = CallOptimized(glue, code, { glue, realNumArgs, func, newTarget, thisValue,
data[0], data[1], data[2] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_WITH_ARGV:
case JSCallMode::CALL_THIS_ARGV_WITH_RETURN:
case JSCallMode::DEPRECATED_CALL_THIS_WITH_ARGV:
thisValue = data[2]; // 2: this input
[[fallthrough]];
case JSCallMode::CALL_WITH_ARGV:
case JSCallMode::DEPRECATED_CALL_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgV),
{ glue, ZExtInt32ToInt64(actualNumArgs), func, newTarget, thisValue, data[1] });
Jump(&exit);
break;
case JSCallMode::DEPRECATED_CALL_CONSTRUCTOR_WITH_ARGV:
case JSCallMode::CALL_CONSTRUCTOR_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgV),
{ glue, ZExtInt32ToInt64(actualNumArgs), func, func, data[2], data[1]});
result = ConstructorCheck(glue, func, *result, data[2]); // 2: the second index
Jump(&exit);
break;
case JSCallMode::CALL_GETTER:
result = CallOptimized(glue, code, { glue, realNumArgs, func, newTarget, data[0] });
Jump(&exit);
break;
case JSCallMode::CALL_SETTER:
result = CallOptimized(glue, code, { glue, realNumArgs, func, newTarget, data[0], data[1] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3_WITH_RETURN:
result = CallOptimized(glue, code,
{ glue, realNumArgs, func, newTarget, data[0], data[1], data[2], data[3] });
Jump(&exit);
break;
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
}
Bind(&slowCallBridge);
{
switch (mode) {
case JSCallMode::CALL_THIS_ARG0:
thisValue = data[0];
[[fallthrough]];
case JSCallMode::CALL_ARG0:
case JSCallMode::DEPRECATED_CALL_ARG0:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue});
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG1:
thisValue = data[1];
[[fallthrough]];
case JSCallMode::CALL_ARG1:
case JSCallMode::DEPRECATED_CALL_ARG1:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue, data[0] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG2:
thisValue = data[2];
[[fallthrough]];
case JSCallMode::CALL_ARG2:
case JSCallMode::DEPRECATED_CALL_ARG2:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue, data[0], data[1] });
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3:
thisValue = data[3];
[[fallthrough]];
case JSCallMode::CALL_ARG3:
case JSCallMode::DEPRECATED_CALL_ARG3:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, thisValue,
data[0], data[1], data[2] }); // 2: args2
Jump(&exit);
break;
case JSCallMode::CALL_THIS_WITH_ARGV:
case JSCallMode::CALL_THIS_ARGV_WITH_RETURN:
case JSCallMode::DEPRECATED_CALL_THIS_WITH_ARGV:
thisValue = data[2]; // 2: this input
[[fallthrough]];
case JSCallMode::CALL_WITH_ARGV:
case JSCallMode::DEPRECATED_CALL_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgVAndPushUndefined),
{ glue, ZExtInt32ToInt64(actualNumArgs), func, newTarget, thisValue, data[1] });
Jump(&exit);
break;
case JSCallMode::DEPRECATED_CALL_CONSTRUCTOR_WITH_ARGV:
case JSCallMode::CALL_CONSTRUCTOR_WITH_ARGV:
result = CallNGCRuntime(glue, RTSTUB_ID(JSCallWithArgVAndPushUndefined),
{ glue, ZExtInt32ToInt64(actualNumArgs), func, func, data[2], data[1]});
result = ConstructorCheck(glue, func, *result, data[2]); // 2: the second index
Jump(&exit);
break;
case JSCallMode::CALL_GETTER:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, data[0]});
Jump(&exit);
break;
case JSCallMode::CALL_SETTER:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, data[0], data[1]});
Jump(&exit);
break;
case JSCallMode::CALL_THIS_ARG3_WITH_RETURN:
result = CallNGCRuntime(glue, RTSTUB_ID(OptimizedCallAndPushUndefined),
{ glue, realNumArgs, func, newTarget, data[0], data[1], data[2], data[3] });
Jump(&exit);
break;
default:
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
}
}
Bind(&methodNotAot);
if (jumpSize != 0) {
SaveJumpSizeIfNeeded(glue, jumpSize);

View File

@ -153,6 +153,9 @@ public:
GateRef CallRuntime(GateRef glue, int index, const std::initializer_list<GateRef>& args);
GateRef CallRuntime(GateRef glue, int index, GateRef argc, GateRef argv);
GateRef CallNGCRuntime(GateRef glue, int index, const std::initializer_list<GateRef>& args);
GateRef FastCallOptimized(GateRef glue, GateRef code, const std::initializer_list<GateRef>& args);
GateRef CallOptimized(GateRef glue, GateRef code, const std::initializer_list<GateRef>& args);
GateRef GetAotCodeAddr(GateRef method);
GateRef CallStub(GateRef glue, int index, const std::initializer_list<GateRef>& args);
GateRef CallBuiltinRuntime(GateRef glue, const std::initializer_list<GateRef>& args,
bool isNew = false, const char* comment = nullptr);
@ -585,6 +588,7 @@ public:
GateRef GetMethodFromJSFunction(GateRef jsfunc);
GateRef IsNativeMethod(GateRef method);
GateRef HasAotCode(GateRef method);
GateRef HasAotCodeAndFastCall(GateRef method);
GateRef GetExpectedNumOfArgs(GateRef method);
GateRef GetMethod(GateRef glue, GateRef obj, GateRef key, GateRef profileTypeInfo, GateRef slotId);
// proxy operator

View File

@ -70,7 +70,7 @@ public:
static void JSFunctionEntry(ExtendedAssembler *assembler);
static void OptimizedCallOptimized(ExtendedAssembler *assembler);
static void OptimizedCallAndPushUndefined(ExtendedAssembler *assembler);
static void CallBuiltinTrampoline(ExtendedAssembler *assembler);
@ -78,26 +78,26 @@ public:
static void JSCall(ExtendedAssembler *assembler);
static void CallOptimized(ExtendedAssembler *assembler);
static void CallRuntimeWithArgv(ExtendedAssembler *assembler);
static void JSCallWithArgV(ExtendedAssembler *assembler);
static void JSCallWithArgVAndPushUndefined(ExtendedAssembler *assembler);
static void DeoptHandlerAsm(ExtendedAssembler *assembler);
static void JSCallNew(ExtendedAssembler *assembler);
static void JSCallNewWithArgV(ExtendedAssembler *assembler);
static void GenJSCall(ExtendedAssembler *assembler, bool isNew);
static void GenJSCallWithArgV(ExtendedAssembler *assembler, bool isNew);
static void GenJSCallWithArgV(ExtendedAssembler *assembler, bool needAddExpectedArgs);
private:
static void DeoptEnterAsmInterp(ExtendedAssembler *assembler);
static void JSCallCheck(ExtendedAssembler *assembler, Register jsfunc, Register taggedValue,
Label *nonCallable, Label *notJSFunction);
static void ThrowNonCallableInternal(ExtendedAssembler *assembler, Register sp);
static void CallOptimziedMethodInternal(ExtendedAssembler *assembler, Register jsfunc, Register actualArgC,
Register callField, Register sp);
static void JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Register glue,
Register actualArgC, Register jsfunc, int stubId);
static void JSProxyCallInternal(ExtendedAssembler *assembler, Register sp, Register jsfunc);
@ -115,6 +115,18 @@ private:
static void PushAsmBridgeFrame(ExtendedAssembler *assembler);
static void PopOptimizedFrame(ExtendedAssembler *assembler);
static void JSCallInternal(ExtendedAssembler *assembler, Register jsfunc, bool isNew = false);
friend class OptimizedFastCall;
};
class OptimizedFastCall : public CommonCall {
public:
static void OptimizedFastCallEntry(ExtendedAssembler *assembler);
static void OptimizedFastCallAndPushUndefined(ExtendedAssembler *assembler);
static void JSFastCallWithArgV(ExtendedAssembler *assembler);
static void JSFastCallWithArgVAndPushUndefined(ExtendedAssembler *assembler);
};
class AsmInterpreterCall : public CommonCall {

View File

@ -120,7 +120,6 @@ void OptimizedCall::IncreaseStackForArguments(ExtendedAssembler *assembler, Regi
// %x1 - actualNumArgs
// %x2 - argV
// %x3 - prevFp
// %x4 - callType
//
// * The JSFunctionEntry Frame's structure is illustrated as the following:
// +--------------------------+
@ -139,14 +138,10 @@ void OptimizedCall::JSFunctionEntry(ExtendedAssembler *assembler)
Register glueReg(X0);
Register argV(X2);
Register prevFpReg(X3);
Register flag(X4);
Register sp(SP);
Register tmpArgV(X7);
Label lJSCallNewWithArgV;
Label lPopFrame;
PushJSFunctionEntryFrame (assembler, prevFpReg);
__ Mov(Register(X6), flag);
__ Mov(tmpArgV, argV);
__ Mov(Register(X20), glueReg);
__ Ldr(Register(X2), MemoryOperand(tmpArgV, 0));
@ -154,58 +149,68 @@ void OptimizedCall::JSFunctionEntry(ExtendedAssembler *assembler)
__ Ldr(Register(X4), MemoryOperand(tmpArgV, DOUBLE_SLOT_SIZE));
__ Add(tmpArgV, tmpArgV, Immediate(TRIPLE_SLOT_SIZE));
__ Mov(Register(X5), tmpArgV);
__ Cmp(Register(X6), Immediate(1));
__ B(Condition::EQ, &lJSCallNewWithArgV);
__ CallAssemblerStub(RTSTUB_ID(JSCallWithArgV), false);
__ B(&lPopFrame);
__ Bind(&lJSCallNewWithArgV);
{
__ CallAssemblerStub(RTSTUB_ID(JSCallNewWithArgV), false);
}
__ Bind(&lPopFrame);
__ Mov(Register(X2), Register(X20));
PopJSFunctionEntryFrame(assembler, Register(X2));
__ Ret();
}
// * uint64_t OptimizedCallOptimized(uintptr_t glue, uint32_t expectedNumArgs, uint32_t actualNumArgs,
// uintptr_t codeAddr, uintptr_t argv)
// * Arguments wil CC calling convention:
// %x0 - glue
// %x1 - codeAddr
// %x2 - actualNumArgs
// %x3 - expectedNumArgs
// %x4 - argv
// * uint64_t OptimizedCallAndPushUndefined(uintptr_t glue, uint32_t argc, JSTaggedType calltarget, JSTaggedType new,
// JSTaggedType this, arg[0], arg[1], arg[2], ..., arg[N-1])
// * webkit_jscc calling convention call js function()
//
// * The OptimizedJSFunctionArgsConfig Frame's structure is illustrated as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | . . . . |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | argC |
// sp ---> +--------------------------+ -----------------
// | | ^
// | prevFP | |
// |--------------------------| OptimizedJSFunctionArgsConfigFrame
// | frameType | |
// | | V
// +--------------------------+ -----------------
void OptimizedCall::OptimizedCallOptimized(ExtendedAssembler *assembler)
// * OptimizedJSFunctionFrame layout description as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | ... |
// +--------------------------+
// | arg[1] |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | this |
// +--------------------------+
// | new-target |
// +--------------------------+
// | call-target |
// |--------------------------|
// | argc |
// |--------------------------| ---------------
// | returnAddr | ^
// sp ----> |--------------------------| |
// | callsiteFp | |
// |--------------------------| OptimizedJSFunctionFrame
// | frameType | |
// |--------------------------| |
// | call-target | v
// +--------------------------+ ---------------
void OptimizedCall::OptimizedCallAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(OptimizedCallOptimized));
Register glue(X0);
__ BindAssemblerStub(RTSTUB_ID(OptimizedCallAndPushUndefined));
Register sp(SP);
Register jsfunc(X7);
Register method(X6);
Register expectedNumArgs(X1);
Register actualNumArgs(X2);
Register codeAddr(X3);
Register argV(X4);
__ Ldr(jsfunc, MemoryOperand(sp, FRAME_SLOT_SIZE));
__ Ldr(method, MemoryOperand(jsfunc, JSFunction::METHOD_OFFSET));
__ Ldr(codeAddr, MemoryOperand(method, Method::CODE_ENTRY_OFFSET));
__ Ldr(expectedNumArgs, MemoryOperand(method, Method::CALL_FIELD_OFFSET));
__ Lsr(expectedNumArgs, expectedNumArgs, MethodLiteral::NumArgsBits::START_BIT);
__ And(expectedNumArgs, expectedNumArgs,
LogicalImmediate::Create(
MethodLiteral::NumArgsBits::Mask() >> MethodLiteral::NumArgsBits::START_BIT, RegXSize));
__ Add(expectedNumArgs, expectedNumArgs, Immediate(NUM_MANDATORY_JSFUNC_ARGS));
__ Add(argV, sp, Immediate(kungfu::ArgumentAccessor::GetExtraArgsNum() * FRAME_SLOT_SIZE)); // skip numArgs
__ Ldr(actualNumArgs, MemoryOperand(sp, 0));
Register glue(X0);
Register currentSp(X5);
Register sp(SP);
Label copyArguments;
Label invokeCompiledJSFunction;
@ -382,7 +387,6 @@ void OptimizedCall::JSCallInternal(ExtendedAssembler *assembler, Register jsfunc
Register callField(X3);
Register actualArgC(X4);
Label callNativeMethod;
Label callOptimizedMethod;
Label lCallConstructor;
Label lCallBuiltinStub;
Label lCallNativeCpp;
@ -396,7 +400,6 @@ void OptimizedCall::JSCallInternal(ExtendedAssembler *assembler, Register jsfunc
if (!isNew) {
__ Tbnz(Register(X5), JSHClass::ClassConstructorBit::START_BIT, &lCallConstructor);
}
__ Tbnz(callField, MethodLiteral::IsAotCodeBit::START_BIT, &callOptimizedMethod);
{
Register argV(X5);
// skip argc
@ -470,10 +473,6 @@ void OptimizedCall::JSCallInternal(ExtendedAssembler *assembler, Register jsfunc
}
}
__ Bind(&callOptimizedMethod);
{
CallOptimziedMethodInternal(assembler, jsfunc, actualArgC, callField, sp);
}
Label jsBoundFunction;
Label jsProxy;
__ Bind(&notJSFunction);
@ -574,32 +573,6 @@ void OptimizedCall::ThrowNonCallableInternal(ExtendedAssembler *assembler, Regis
__ Ret();
}
void OptimizedCall::CallOptimziedMethodInternal(ExtendedAssembler *assembler, Register jsfunc, Register actualArgC,
Register callField, Register sp)
{
Register expectedNumArgs(X1, W);
Register arg2(X2);
Register codeAddress(X3);
Register argV(X4);
Register method(X5);
Label directCallCodeEntry;
__ Mov(Register(X5), jsfunc);
__ Mov(arg2, actualArgC);
__ Lsr(callField, callField, MethodLiteral::NumArgsBits::START_BIT);
__ And(callField.W(), callField.W(),
LogicalImmediate::Create(
MethodLiteral::NumArgsBits::Mask() >> MethodLiteral::NumArgsBits::START_BIT, RegWSize));
__ Add(expectedNumArgs, callField.W(), Immediate(NUM_MANDATORY_JSFUNC_ARGS));
__ Cmp(arg2.W(), expectedNumArgs);
__ Add(argV, sp, Immediate(kungfu::ArgumentAccessor::GetExtraArgsNum() * FRAME_SLOT_SIZE)); // skip numArgs
__ Ldr(method, MemoryOperand(Register(X5), JSFunctionBase::METHOD_OFFSET)); // get method
__ Ldr(codeAddress, MemoryOperand(method, Method::CODE_ENTRY_OFFSET)); // get codeAddress
__ B(Condition::HS, &directCallCodeEntry);
__ CallAssemblerStub(RTSTUB_ID(OptimizedCallOptimized), true);
__ Bind(&directCallCodeEntry);
__ Br(codeAddress);
}
void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Register glue,
Register actualArgC, Register jsfunc, int stubId)
{
@ -616,6 +589,9 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re
Register realArgC(X7, W);
Label copyBoundArgument;
Label pushCallTarget;
Label popArgs;
Label slowCall;
Label aotCall;
// get bound arguments
__ Ldr(boundLength, MemoryOperand(jsfunc, JSBoundFunction::BOUND_ARGUMENTS_OFFSET));
// get bound length
@ -646,11 +622,11 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re
PushArgsWithArgv(assembler, glue, boundLength, boundArgs, tmp, fp, nullptr, nullptr);
}
}
Register boundTarget(X7);
Register newTarget(X6);
__ Bind(&pushCallTarget);
{
Register thisObj(X4);
Register newTarget(X6);
Register boundTarget(X7);
__ Ldr(thisObj, MemoryOperand(jsfunc, JSBoundFunction::BOUND_THIS_OFFSET));
__ Mov(newTarget, Immediate(JSTaggedValue::VALUE_UNDEFINED));
// 2 : 2 means pair
@ -659,8 +635,39 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re
// 2 : 2 means pair
__ Stp(Register(X19), boundTarget, MemoryOperand(fp, -FRAME_SLOT_SIZE * 2, AddrMode::PREINDEX));
}
__ CallAssemblerStub(stubId, false);
JSCallCheck(assembler, boundTarget, Register(X9), &slowCall, &slowCall);
Register hclass = __ AvailableRegister2();
__ Ldr(hclass, MemoryOperand(boundTarget, 0));
__ Ldr(hclass, MemoryOperand(hclass, JSHClass::BIT_FIELD_OFFSET));
__ Tbnz(hclass, JSHClass::ClassConstructorBit::START_BIT, &slowCall);
Register callField(X9);
__ Ldr(Register(X8), MemoryOperand(boundTarget, JSFunction::METHOD_OFFSET));
__ Ldr(callField, MemoryOperand(Register(X8), Method::CALL_FIELD_OFFSET));
__ Tbnz(callField, MethodLiteral::IsAotCodeBit::START_BIT, &aotCall);
__ Bind(&aotCall);
{
// output: glue:x0 argc:x1 calltarget:x2 argv:x3 this:x4 newtarget:x5
__ Mov(Register(X1), Register(X19));
__ Mov(Register(X2), boundTarget);
__ Add(X3, fp, Immediate(4 * FRAME_SLOT_SIZE)); // 4: skip argc and func new this
__ Mov(Register(X5), Register(X6));
Register boundCallInternalId(X9);
Register baseAddress(X8);
Register codeAddress(X10);
__ Mov(baseAddress, Immediate(JSThread::GlueData::GetCOStubEntriesOffset(false)));
__ Mov(boundCallInternalId, Immediate(CommonStubCSigns::JsBoundCallInternal));
__ Add(codeAddress, X0, baseAddress);
__ Ldr(codeAddress, MemoryOperand(codeAddress, boundCallInternalId, UXTW, FRAME_SLOT_SIZE_LOG2));
__ Blr(codeAddress);
__ B(&popArgs);
}
__ Bind(&slowCall);
{
__ CallAssemblerStub(stubId, false);
__ B(&popArgs);
}
__ Bind(&popArgs);
PopJSFunctionArgs(assembler, Register(X19), Register(X19));
PopOptimizedArgsConfigFrame(assembler);
__ Ret();
@ -919,19 +926,15 @@ void OptimizedCall::PopOptimizedUnfoldArgVFrame(ExtendedAssembler *assembler)
//
// * OptimizedJSFunctionFrame layout description as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | . . . . . |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | this |
// +--------------------------+
// | new-target |
// +--------------------------+
// | call-target |
// | argn |
// |--------------------------|
// | argc |
// | argn - 1 |
// |--------------------------|
// | ..... |
// |--------------------------|
// | arg2 |
// |--------------------------|
// | arg1 |
// sp ----> |--------------------------| ---------------
// | returnAddr | ^
// |--------------------------| |
@ -942,7 +945,7 @@ void OptimizedCall::PopOptimizedUnfoldArgVFrame(ExtendedAssembler *assembler)
// | call-target | v
// +--------------------------+ ---------------
void OptimizedCall::GenJSCallWithArgV(ExtendedAssembler *assembler, bool isNew)
void OptimizedCall::GenJSCallWithArgV(ExtendedAssembler *assembler, bool needAddExpectedArgs)
{
Register sp(SP);
Register glue(X0);
@ -973,10 +976,10 @@ void OptimizedCall::GenJSCallWithArgV(ExtendedAssembler *assembler, bool isNew)
PushMandatoryJSArgs(assembler, jsfunc, thisObj, newTarget, currentSp);
__ Str(actualNumArgs, MemoryOperand(currentSp, -FRAME_SLOT_SIZE, AddrMode::PREINDEX));
if (isNew) {
__ CallAssemblerStub(RTSTUB_ID(JSCallNew), false);
if (needAddExpectedArgs) {
__ CallAssemblerStub(RTSTUB_ID(OptimizedCallAndPushUndefined), false);
} else {
__ CallAssemblerStub(RTSTUB_ID(JSCall), false);
__ CallAssemblerStub(RTSTUB_ID(CallOptimized), false);
}
__ Ldr(actualNumArgs, MemoryOperand(sp, 0));
@ -985,9 +988,19 @@ void OptimizedCall::GenJSCallWithArgV(ExtendedAssembler *assembler, bool isNew)
__ Ret();
}
void OptimizedCall::JSCallNewWithArgV(ExtendedAssembler *assembler)
// * uint64_t JSCallWithArgVAndPushUndefined(uintptr_t glue, uint32_t argc, JSTaggedType calltarget,
// JSTaggedType new, JSTaggedType this, argV)
// * cc calling convention call js function()
// * arguments:
// %x0 - glue
// %x1 - argc
// %x2 - call-target
// %x3 - new-target
// %x4 - this
// %x5 - argv
void OptimizedCall::JSCallWithArgVAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSCallNewWithArgV));
__ BindAssemblerStub(RTSTUB_ID(JSCallWithArgVAndPushUndefined));
GenJSCallWithArgV(assembler, true);
}
@ -997,6 +1010,19 @@ void OptimizedCall::JSCallWithArgV(ExtendedAssembler *assembler)
GenJSCallWithArgV(assembler, false);
}
void OptimizedCall::CallOptimized(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(CallOptimized));
Register sp(SP);
Register jsfunc(X7);
Register method(X6);
Register codeAddr(X5);
__ Ldr(jsfunc, MemoryOperand(sp, FRAME_SLOT_SIZE));
__ Ldr(method, MemoryOperand(jsfunc, JSFunction::METHOD_OFFSET));
__ Ldr(codeAddr, MemoryOperand(method, Method::CODE_ENTRY_OFFSET));
__ Br(codeAddr);
}
void OptimizedCall::DeoptEnterAsmInterp(ExtendedAssembler *assembler)
{
// rdi

View File

@ -0,0 +1,461 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecmascript/compiler/trampoline/aarch64/common_call.h"
#include "ecmascript/compiler/assembler/assembler.h"
#include "ecmascript/compiler/argument_accessor.h"
#include "ecmascript/compiler/common_stubs.h"
#include "ecmascript/compiler/rt_call_signature.h"
#include "ecmascript/deoptimizer/deoptimizer.h"
#include "ecmascript/ecma_runtime_call_info.h"
#include "ecmascript/frames.h"
#include "ecmascript/js_function.h"
#include "ecmascript/method.h"
#include "ecmascript/js_thread.h"
#include "ecmascript/message_string.h"
#include "ecmascript/runtime_call_id.h"
namespace panda::ecmascript::aarch64 {
using Label = panda::ecmascript::Label;
#define __ assembler->
// * uint64_t OptimizedFastCallEntry(uintptr_t glue, uint32_t actualNumArgs, const JSTaggedType argV[],
// uintptr_t prevFp)
// * Arguments:
// %x0 - glue
// %x1 - actualNumArgs
// %x2 - argV
// %x3 - prevFp
void OptimizedFastCall::OptimizedFastCallEntry(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(OptimizedFastCallEntry));
Register glueReg(X0);
Register argc(X1);
Register argV(X2);
Register prevFpReg(X3);
Register sp(SP);
OptimizedCall::PushJSFunctionEntryFrame (assembler, prevFpReg);
__ Mov(Register(X3), argc);
__ Mov(Register(X4), argV);
Register tmpArgc(X3);
Register tmpArgV(X4);
__ Mov(Register(X20), glueReg);
__ Ldr(Register(X1), MemoryOperand(tmpArgV, 0));
__ Ldr(Register(X2), MemoryOperand(tmpArgV, FRAME_SLOT_SIZE));
__ Add(tmpArgV, tmpArgV, Immediate(DOUBLE_SLOT_SIZE));
__ CallAssemblerStub(RTSTUB_ID(JSFastCallWithArgV), false);
__ Mov(Register(X2), Register(X20));
OptimizedCall::PopJSFunctionEntryFrame(assembler, Register(X2));
__ Ret();
}
// * uint64_t OptimizedFastCallAndPushUndefined(uintptr_t glue, uint32_t expectedNumArgs, uint32_t actualNumArgs,
// uintptr_t codeAddr, uintptr_t argv)
// * Arguments wil CC calling convention:
// %x0 - glue
// %x1 - actualNumArgs
// %x2 - func
// %x3 - new target
// %x4 - this
// %x5 - arg0
// %x6 - arg1
// %x7 - arg2
//
// * The OptimizedJSFunctionArgsConfig Frame's structure is illustrated as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | . . . . |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | argC |
// sp ---> +--------------------------+ -----------------
// | | ^
// | prevFP | |
// |--------------------------| OptimizedJSFunctionArgsConfigFrame
// | frameType | |
// | | V
// +--------------------------+ -----------------
void OptimizedFastCall::OptimizedFastCallAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(OptimizedFastCallAndPushUndefined));
Register glue(X0);
Register actualNumArgs(X1);
Register jsfunc(X2);
Register codeAddr(X3);
Register sp(SP);
Register currentSp = __ AvailableRegister1();
Register op = __ AvailableRegister1();
Label call;
Label arg4;
Label arg5;
Label arg6;
Label argc;
Label checkExpectedArgs;
// construct frame
OptimizedCall::PushOptimizedArgsConfigFrame(assembler);
__ Mov(__ AvailableRegister3(), Register(X1));
__ Add(__ AvailableRegister4(), sp, Immediate(4 * FRAME_SLOT_SIZE)); // 4 skip fp lr type x19
Register actualNumArgsReg = __ AvailableRegister3();
Register argV = __ AvailableRegister4();
Register method = __ AvailableRegister1();
Register expectedNumArgs = __ AvailableRegister2();
__ Ldr(method, MemoryOperand(jsfunc, JSFunction::METHOD_OFFSET));
__ Ldr(expectedNumArgs, MemoryOperand(method, Method::CALL_FIELD_OFFSET));
__ Lsr(expectedNumArgs, expectedNumArgs, MethodLiteral::NumArgsBits::START_BIT);
__ And(expectedNumArgs, expectedNumArgs,
LogicalImmediate::Create(
MethodLiteral::NumArgsBits::Mask() >> MethodLiteral::NumArgsBits::START_BIT, RegXSize));
__ Add(expectedNumArgs, expectedNumArgs, Immediate(NUM_MANDATORY_JSFUNC_ARGS));
Label arg7;
Label arg8;
__ Mov(Register(X1), Register(X2)); // func move to argc
__ Mov(Register(X2), Register(X4)); // this move to func
jsfunc = Register(X1);
__ Cmp(actualNumArgsReg, Immediate(3)); // 3: 3 args
__ B(Condition::NE, &arg4);
__ Mov(Register(X3), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X4), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X5), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
__ Bind(&arg4);
{
__ Mov(Register(X3), Register(X5));
__ Cmp(actualNumArgsReg, Immediate(4)); // 4: 4 args
__ B(Condition::NE, &arg5);
__ Mov(Register(X4), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X5), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg5);
{
__ Mov(Register(X4), Register(X6));
__ Cmp(actualNumArgsReg, Immediate(5)); // 5: 5 args
__ B(Condition::NE, &arg6);
__ Mov(Register(X5), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg6);
{
__ Mov(Register(X5), Register(X7));
__ Cmp(actualNumArgsReg, Immediate(6)); // 6: 6 args
__ B(Condition::NE, &arg7);
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg7);
{
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X6), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Cmp(actualNumArgsReg, Immediate(7)); // 7: 7 args
__ B(Condition::NE, &arg8);
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg8);
{
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X7), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Cmp(actualNumArgsReg, Immediate(8)); // 8: 8 args
__ B(Condition::NE, &argc);
__ B(&checkExpectedArgs);
}
__ Bind(&argc);
{
__ Sub(expectedNumArgs, expectedNumArgs, Immediate(8)); // 8 : register save 8 arg
__ Sub(actualNumArgsReg, actualNumArgsReg, Immediate(8)); // 8 : register save 8 arg
OptimizedCall::IncreaseStackForArguments(assembler, expectedNumArgs, currentSp);
TempRegister1Scope scope1(assembler);
TempRegister2Scope scope2(assembler);
Register tmp = __ TempRegister1();
Register undefinedValue = __ TempRegister2();
__ Sub(tmp, expectedNumArgs, actualNumArgsReg);
PushUndefinedWithArgc(assembler, glue, tmp, undefinedValue, currentSp, nullptr, nullptr);
PushArgsWithArgv(assembler, glue, actualNumArgsReg, argV, undefinedValue, currentSp, nullptr, nullptr);
__ B(&call);
}
__ Bind(&checkExpectedArgs);
{
__ Cmp(expectedNumArgs, Immediate(8)); // 8 : register save 8 arg
__ B(Condition::LS, &call);
__ Sub(expectedNumArgs, expectedNumArgs, Immediate(8)); // 8 : register save 8 arg
OptimizedCall::IncreaseStackForArguments(assembler, expectedNumArgs, currentSp);
TempRegister2Scope scope2(assembler);
Register undefinedValue = __ TempRegister2();
PushUndefinedWithArgc(assembler, glue, expectedNumArgs, undefinedValue, currentSp, nullptr, nullptr);
__ B(&call);
}
__ Bind(&call);
TempRegister1Scope scope1(assembler);
Register method1 = __ TempRegister1();
__ Ldr(method1, MemoryOperand(jsfunc, JSFunction::METHOD_OFFSET));
__ Ldr(X11, MemoryOperand(method1, Method::CODE_ENTRY_OFFSET));
__ Blr(X11);
__ Mov(Register(SP), Register(FP));
__ RestoreFpAndLr();
__ Ret();
}
// * uint64_t JSFastCallWithArgV(uintptr_t glue, uint32_t argc, JSTaggedType calltarget,
// JSTaggedType this, argV)
// * cc calling convention call js function()
// * arguments:
// %x0 - glue
// %x1 - call-target
// %x2 - this
// %x3 - artual argc
// %x4 - argv
void OptimizedFastCall::JSFastCallWithArgV(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSFastCallWithArgV));
Register sp(SP);
Register glue(X0);
Register actualNumArgs(X3);
Register jsfunc(X1);
Register thisObj(X2);
Register currentSp = __ AvailableRegister1();
Register callsiteSp = __ AvailableRegister2();
Label call;
__ Mov(callsiteSp, sp);
OptimizedCall::PushOptimizedUnfoldArgVFrame(assembler, callsiteSp);
TempRegister2Scope scope2(assembler);
Register op = __ TempRegister2();
Register argC = __ AvailableRegister3();
Register argV = __ AvailableRegister4();
__ Mov(argC, actualNumArgs);
__ Mov(argV, Register(X4));
__ Cmp(argC, Immediate(0));
__ B(Condition::EQ, &call);
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X3), op); // first arg
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Sub(argC, argC, Immediate(1));
__ Cmp(argC, Immediate(0));
__ B(Condition::EQ, &call);
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X4), op); // second arg
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Sub(argC, argC, Immediate(1));
__ Cmp(argC, Immediate(0));
__ B(Condition::EQ, &call);
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X5), op); // third arg
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Sub(argC, argC, Immediate(1));
__ Cmp(argC, Immediate(0));
__ B(Condition::EQ, &call);
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X6), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Sub(argC, argC, Immediate(1));
__ Cmp(argC, Immediate(0));
__ B(Condition::EQ, &call);
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X7), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Sub(argC, argC, Immediate(1));
__ Cmp(argC, Immediate(0));
__ B(Condition::EQ, &call);
OptimizedCall::IncreaseStackForArguments(assembler, argC, currentSp);
PushArgsWithArgv(assembler, glue, argC, argV, op, currentSp, nullptr, nullptr);
__ Bind(&call);
TempRegister1Scope scope1(assembler);
Register method = __ TempRegister1();
__ Ldr(method, MemoryOperand(jsfunc, JSFunction::METHOD_OFFSET));
__ Ldr(X11, MemoryOperand(method, Method::CODE_ENTRY_OFFSET));
__ Blr(X11);
__ Mov(Register(SP), Register(FP));
__ RestoreFpAndLr();
__ Ret();
}
// * Arguments:
// %x0 - glue
// %x1 - func
// %x2 - this
// %x3 - actualNumArgs
// %x4 - argv
// %x5 - expectedNumArgs
void OptimizedFastCall::JSFastCallWithArgVAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSFastCallWithArgVAndPushUndefined));
Register sp(SP);
Register glue(X0);
Register jsfunc(X1);
Register thisObj(X2);
Register currentSp = __ AvailableRegister1();
Register op = __ AvailableRegister1();
Register callsiteSp = __ AvailableRegister2();
Label call;
Label arg1;
Label arg2;
Label arg3;
Label arg4;
Label arg5;
Label argc;
Label checkExpectedArgs;
__ Mov(callsiteSp, sp);
OptimizedCall::PushOptimizedUnfoldArgVFrame(assembler, callsiteSp);
Register actualNumArgsReg = __ AvailableRegister3();
Register argV = __ AvailableRegister4();
Register expectedNumArgs = __ AvailableRegister2();
__ Mov(actualNumArgsReg, Register(X3));
__ Mov(argV, Register(X4));
__ Mov(expectedNumArgs, Register(X5));
__ Cmp(actualNumArgsReg, Immediate(0));
__ B(Condition::NE, &arg1);
__ Mov(Register(X3), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X4), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X5), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
__ Bind(&arg1);
{
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X3), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Cmp(actualNumArgsReg, Immediate(1));
__ B(Condition::NE, &arg2);
__ Mov(Register(X4), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X5), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg2);
{
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X4), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Cmp(actualNumArgsReg, Immediate(2)); // 2: 2 args
__ B(Condition::NE, &arg3);
__ Mov(Register(X5), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg3);
{
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X5), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Cmp(actualNumArgsReg, Immediate(3)); // 3: 3 args
__ B(Condition::NE, &arg4);
__ Mov(Register(X6), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg4);
{
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X6), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Cmp(actualNumArgsReg, Immediate(4)); // 4: 4 args
__ B(Condition::NE, &arg5);
__ Mov(Register(X7), Immediate(JSTaggedValue::VALUE_UNDEFINED));
__ B(&checkExpectedArgs);
}
__ Bind(&arg5);
{
__ Ldr(op, MemoryOperand(argV, 0));
__ Mov(Register(X7), op);
__ Add(argV, argV, Immediate(FRAME_SLOT_SIZE));
__ Cmp(actualNumArgsReg, Immediate(5)); // 5: 5 args
__ B(Condition::NE, &argc);
__ B(&checkExpectedArgs);
}
__ Bind(&argc);
{
__ Sub(expectedNumArgs, expectedNumArgs, Immediate(5)); // 5 : register save 5 arg
__ Sub(actualNumArgsReg, actualNumArgsReg, Immediate(5)); // 5 : register save 5 arg
OptimizedCall::IncreaseStackForArguments(assembler, expectedNumArgs, currentSp);
TempRegister1Scope scope1(assembler);
TempRegister2Scope scope2(assembler);
Register tmp = __ TempRegister1();
Register undefinedValue = __ TempRegister2();
__ Sub(tmp, expectedNumArgs, actualNumArgsReg);
PushUndefinedWithArgc(assembler, glue, tmp, undefinedValue, currentSp, nullptr, nullptr);
PushArgsWithArgv(assembler, glue, actualNumArgsReg, argV, undefinedValue, currentSp, nullptr, nullptr);
__ B(&call);
}
__ Bind(&checkExpectedArgs);
{
__ Cmp(expectedNumArgs, Immediate(5)); // 5 : register save 5 arg
__ B(Condition::LS, &call);
__ Sub(expectedNumArgs, expectedNumArgs, Immediate(5)); // 5 : register save 5 arg
OptimizedCall::IncreaseStackForArguments(assembler, expectedNumArgs, currentSp);
TempRegister2Scope scope2(assembler);
Register undefinedValue = __ TempRegister2();
PushUndefinedWithArgc(assembler, glue, expectedNumArgs, undefinedValue, currentSp, nullptr, nullptr);
__ B(&call);
}
__ Bind(&call);
TempRegister1Scope scope1(assembler);
Register method = __ TempRegister1();
__ Ldr(method, MemoryOperand(X1, JSFunction::METHOD_OFFSET));
__ Ldr(X11, MemoryOperand(method, Method::CODE_ENTRY_OFFSET));
__ Blr(X11);
__ Mov(Register(SP), Register(FP));
__ RestoreFpAndLr();
__ Ret();
}
#undef __
} // panda::ecmascript::aarch64

View File

@ -46,7 +46,7 @@ public:
static void JSFunctionEntry(ExtendedAssembler *assembler);
static void OptimizedCallOptimized(ExtendedAssembler *assembler);
static void OptimizedCallAndPushUndefined(ExtendedAssembler *assembler);
static void CallBuiltinTrampoline(ExtendedAssembler *assembler);
@ -54,27 +54,26 @@ public:
static void JSCall(ExtendedAssembler *assembler);
static void CallOptimized(ExtendedAssembler *assembler);
static void CallRuntimeWithArgv(ExtendedAssembler *assembler);
static void JSCallWithArgV(ExtendedAssembler *assembler);
static void JSCallWithArgVAndPushUndefined(ExtendedAssembler *assembler);
static void DeoptHandlerAsm(ExtendedAssembler *assembler);
static void JSCallNew(ExtendedAssembler *assembler);
static void JSCallNewWithArgV(ExtendedAssembler *assembler);
static void GenJSCall(ExtendedAssembler *assembler, bool isNew);
static void GenJSCallWithArgV(ExtendedAssembler *assembler, bool isNew);
static void GenJSCallWithArgV(ExtendedAssembler *assembler, bool needAddExpectedArgs);
private:
static void DeoptEnterAsmInterp(ExtendedAssembler *assembler);
static void JSCallCheck(ExtendedAssembler *assembler, Register jsFuncReg,
Label *lNonCallable, Label *lNotJSFunction, Label *lJSFunctionCall);
static void ThrowNonCallableInternal(ExtendedAssembler *assembler, Register glueReg);
static void CallOptimziedMethodInternal(ExtendedAssembler *assembler, Register glueReg, Register jsFuncReg,
Register methodCallField, Register argc, Register codeAddrReg,
Register expectedNumArgsReg);
static void JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Register jsFuncReg, Label *jsCall);
static void JSProxyCallInternal(ExtendedAssembler *assembler, Register jsFuncReg);
static void OptimizedCallAsmInterpreter(ExtendedAssembler *assembler);
@ -87,6 +86,19 @@ private:
static void PopJSFunctionEntryFrame(ExtendedAssembler *assembler, Register glue);
static void PushOptimizedUnfoldArgVFrame(ExtendedAssembler *assembler, Register callSiteSp);
static void PopOptimizedUnfoldArgVFrame(ExtendedAssembler *assembler);
friend class OptimizedFastCall;
};
class OptimizedFastCall : public CommonCall {
public:
static void OptimizedFastCallEntry(ExtendedAssembler *assembler);
static void OptimizedFastCallAndPushUndefined(ExtendedAssembler *assembler);
static void JSFastCallWithArgV(ExtendedAssembler *assembler);
static void JSFastCallWithArgVAndPushUndefined(ExtendedAssembler *assembler);
};
class AsmInterpreterCall : public CommonCall {
@ -177,5 +189,103 @@ private:
Label *fastPathEntry, Label *pushCallThis, Label *stackOverflow);
friend class OptimizedCall;
};
class JsFunctionArgsConfigFrameScope {
public:
static constexpr int FRAME_SLOT_SIZE = 8;
explicit JsFunctionArgsConfigFrameScope(ExtendedAssembler *assembler) : assembler_(assembler)
{
assembler_->Pushq(rbp);
assembler_->Pushq(static_cast<int32_t>(FrameType::OPTIMIZED_JS_FUNCTION_ARGS_CONFIG_FRAME));
// 2: skip jsFunc and frameType
assembler_->Leaq(Operand(rsp, FRAME_SLOT_SIZE), rbp);
// callee save
assembler_->Pushq(r12);
assembler_->Pushq(r13);
assembler_->Pushq(r14);
assembler_->Pushq(rbx);
assembler_->Pushq(rax);
}
~JsFunctionArgsConfigFrameScope()
{
assembler_->Movq(rbp, rsp);
assembler_->Addq(-5 * FRAME_SLOT_SIZE, rsp); // -5: get r12 r13 r14 rbx
assembler_->Popq(rbx);
assembler_->Popq(r14);
assembler_->Popq(r13);
assembler_->Popq(r12);
assembler_->Addq(FRAME_SLOT_SIZE, rsp); // skip frame type
assembler_->Pop(rbp);
assembler_->Ret();
}
NO_COPY_SEMANTIC(JsFunctionArgsConfigFrameScope);
NO_MOVE_SEMANTIC(JsFunctionArgsConfigFrameScope);
private:
ExtendedAssembler *assembler_;
};
class OptimizedUnfoldArgVFrameFrameScope {
public:
static constexpr int FRAME_SLOT_SIZE = 8;
explicit OptimizedUnfoldArgVFrameFrameScope(ExtendedAssembler *assembler) : assembler_(assembler)
{
assembler_->Pushq(rbp);
// construct frame
assembler_->Pushq(static_cast<int64_t>(FrameType::OPTIMIZED_JS_FUNCTION_UNFOLD_ARGV_FRAME));
assembler_->Pushq(assembler_->AvailableRegister2());
// 2: skip callSiteSp and frameType
assembler_->Leaq(Operand(rsp, 2 * FRAME_SLOT_SIZE), rbp);
assembler_->Pushq(rbx);
assembler_->Pushq(r12); // callee save
}
~OptimizedUnfoldArgVFrameFrameScope()
{
assembler_->Movq(rbp, rsp);
assembler_->Addq(-4 * FRAME_SLOT_SIZE, rsp); // -4: get r12 rbx
assembler_->Popq(r12);
assembler_->Popq(rbx);
assembler_->Addq(2 * FRAME_SLOT_SIZE, rsp); // 2: skip frame type and sp
assembler_->Popq(rbp);
assembler_->Ret();
}
NO_COPY_SEMANTIC(OptimizedUnfoldArgVFrameFrameScope);
NO_MOVE_SEMANTIC(OptimizedUnfoldArgVFrameFrameScope);
private:
ExtendedAssembler *assembler_;
};
class OptimizedUnfoldArgVFrameFrame1Scope {
public:
static constexpr int FRAME_SLOT_SIZE = 8;
explicit OptimizedUnfoldArgVFrameFrame1Scope(ExtendedAssembler *assembler) : assembler_(assembler)
{
assembler_->Pushq(rbp);
// construct frame
assembler_->Pushq(static_cast<int64_t>(FrameType::OPTIMIZED_JS_FUNCTION_UNFOLD_ARGV_FRAME));
assembler_->Pushq(assembler_->AvailableRegister2());
// 2: skip callSiteSp and frameType
assembler_->Leaq(Operand(rsp, 2 * FRAME_SLOT_SIZE), rbp);
assembler_->Pushq(rbx);
assembler_->Pushq(r12); // callee save
assembler_->Pushq(r13);
assembler_->Pushq(r14); // callee save
}
~OptimizedUnfoldArgVFrameFrame1Scope()
{
assembler_->Movq(rbp, rsp);
assembler_->Addq(-6 * FRAME_SLOT_SIZE, rsp); // -6: get r12 r13 r14 rbx
assembler_->Popq(r14);
assembler_->Popq(r13);
assembler_->Popq(r12);
assembler_->Popq(rbx);
assembler_->Addq(2 * FRAME_SLOT_SIZE, rsp); // 2: skip frame type and sp
assembler_->Popq(rbp);
assembler_->Ret();
}
NO_COPY_SEMANTIC(OptimizedUnfoldArgVFrameFrame1Scope);
NO_MOVE_SEMANTIC(OptimizedUnfoldArgVFrameFrame1Scope);
private:
ExtendedAssembler *assembler_;
};
} // namespace panda::ecmascript::x64
#endif // ECMASCRIPT_COMPILER_ASSEMBLER_MODULE_X64_H

View File

@ -31,14 +31,12 @@
namespace panda::ecmascript::x64 {
#define __ assembler->
// * uint64_t JSFunctionEntry(uintptr_t glue, uint32_t actualNumArgs, const JSTaggedType argV[], uintptr_t prevFp,
// size_t callType)
// * uint64_t JSFunctionEntry(uintptr_t glue, uint32_t actualNumArgs, const JSTaggedType argV[], uintptr_t prevFp)
// * Arguments:
// %rdi - glue
// %rsi - actualNumArgs
// %rdx - argV
// %rcx - prevFp
// %r8 - callType
//
// * The JSFunctionEntry Frame's structure is illustrated as the following:
// +--------------------------+
@ -57,36 +55,16 @@ void OptimizedCall::JSFunctionEntry(ExtendedAssembler *assembler)
Register glueReg = rdi;
Register argv = rdx;
Register prevFpReg = rcx;
Register flag = r8;
Label lJSCallNewWithArgV;
Label lPopFrame;
__ PushCppCalleeSaveRegisters();
__ Pushq(glueReg); // caller save
// construct the frame
__ Pushq(rbp);
__ Pushq(static_cast<int32_t>(FrameType::OPTIMIZED_ENTRY_FRAME));
__ Pushq(prevFpReg);
// 2: skip prevFp and frameType
__ Leaq(Operand(rsp, 2 * FRAME_SLOT_SIZE), rbp);
__ Movq(flag, r12);
PushJSFunctionEntryFrame(assembler, prevFpReg);
__ Movq(argv, rbx);
__ Movq(Operand(rbx, 0), rdx);
__ Movq(Operand(rbx, FRAME_SLOT_SIZE), rcx);
__ Movq(Operand(rbx, DOUBLE_SLOT_SIZE), r8);
__ Addq(TRIPLE_SLOT_SIZE, rbx);
__ Movq(rbx, r9);
__ Cmpl(1, r12);
__ Je(&lJSCallNewWithArgV);
__ CallAssemblerStub(RTSTUB_ID(JSCallWithArgV), false);
__ Jmp(&lPopFrame);
__ Bind(&lJSCallNewWithArgV);
{
__ CallAssemblerStub(RTSTUB_ID(JSCallNewWithArgV), false);
}
__ Bind(&lPopFrame);
__ Popq(prevFpReg);
__ Addq(FRAME_SLOT_SIZE, rsp); // 8: frame type
__ Popq(rbp);
@ -96,40 +74,60 @@ void OptimizedCall::JSFunctionEntry(ExtendedAssembler *assembler)
__ Ret();
}
// * uint64_t OptimizedCallOptimized(uintptr_t glue, uint32_t expectedNumArgs, uint32_t actualNumArgs,
// uintptr_t codeAddr, uintptr_t argv)
// * Arguments wil CC calling convention:
// %rdi - glue
// %rsi - codeAddr
// %rdx - actualNumArgs
// %rcx - expectedNumArgs
// %r8 - argv
// * uint64_t OptimizedCallAndPushUndefined(uintptr_t glue, uint32_t argc, JSTaggedType calltarget, JSTaggedType new,
// JSTaggedType this, arg[0], arg[1], arg[2], ..., arg[N-1])
// * webkit_jscc calling convention call js function()
//
// * The OptimizedJSFunctionArgsConfig Frame's structure is illustrated as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | . . . . |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | argC |
// sp ---> +--------------------------+ -----------------
// | | ^
// | prevFP | |
// |--------------------------| OptimizedJSFunctionArgsConfigFrame
// | frameType | |
// | | V
// +--------------------------+ -----------------
void OptimizedCall::OptimizedCallOptimized(ExtendedAssembler *assembler)
// * OptimizedJSFunctionFrame layout description as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | ... |
// +--------------------------+
// | arg[1] |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | this |
// +--------------------------+
// | new-target |
// +--------------------------+
// | call-target |
// |--------------------------|
// | argc |
// |--------------------------| ---------------
// | returnAddr | ^
// sp ----> |--------------------------| |
// | callsiteFp | |
// |--------------------------| OptimizedJSFunctionFrame
// | frameType | |
// |--------------------------| |
// | call-target | v
// +--------------------------+ ---------------
void OptimizedCall::OptimizedCallAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(OptimizedCallOptimized));
Register glueReg = rdi;
__ BindAssemblerStub(RTSTUB_ID(OptimizedCallAndPushUndefined));
Register jsFuncReg = rdi;
Register method = r9;
Register codeAddrReg = rsi;
__ Movq(Operand(rsp, DOUBLE_SLOT_SIZE), jsFuncReg); // sp + 16 get jsFunc
__ Mov(Operand(jsFuncReg, JSFunctionBase::METHOD_OFFSET), method); // get method
__ Mov(Operand(method, Method::CODE_ENTRY_OFFSET), codeAddrReg);
Register methodCallField = rcx;
__ Mov(Operand(method, Method::CALL_FIELD_OFFSET), methodCallField); // get call field
__ Shr(MethodLiteral::NumArgsBits::START_BIT, methodCallField);
__ Andl(((1LU << MethodLiteral::NumArgsBits::SIZE) - 1), methodCallField);
__ Addl(NUM_MANDATORY_JSFUNC_ARGS, methodCallField); // add mandatory argumentr
__ Movl(Operand(rsp, FRAME_SLOT_SIZE), rdx); // argc rdx
__ Movq(rsp, r8);
Register argvReg = r8;
auto funcSlotOffset = kungfu::ArgumentAccessor::GetExtraArgsNum() + 1; // 1: return addr
__ Addq(funcSlotOffset * FRAME_SLOT_SIZE, argvReg); // skip return addr and argc
Register expectedNumArgsReg = rcx;
Register actualNumArgsReg = rdx;
Register codeAddrReg = rsi;
Register argvReg = r8;
Label lCopyExtraAument1;
Label lCopyLoop1;
@ -161,7 +159,6 @@ void OptimizedCall::OptimizedCallOptimized(ExtendedAssembler *assembler)
__ Jne(&lCopyLoop1);
__ Pushq(actualNumArgsReg); // actual argc
__ Movq(glueReg, rax); // mov glue to rax
__ Callq(codeAddrReg); // then call jsFunction
__ Leaq(Operand(r14, Scale::Times8, 0), codeAddrReg);
__ Addq(codeAddrReg, rsp);
@ -305,7 +302,6 @@ void OptimizedCall::GenJSCall(ExtendedAssembler *assembler, bool isNew)
Label lJSFunctionCall;
Label lJSBoundFunction;
Label lJSProxy;
Label lCallOptimziedMethod;
Label lCallNativeMethod;
Label lCallNativeCpp;
Label lCallNativeBuiltinStub;
@ -352,8 +348,6 @@ void OptimizedCall::GenJSCall(ExtendedAssembler *assembler, bool isNew)
__ Btq(JSHClass::ClassConstructorBit::START_BIT, rax); // is CallConstructor
__ Jb(&lCallConstructor);
}
__ Btq(MethodLiteral::IsAotCodeBit::START_BIT, methodCallField); // is aot
__ Jb(&lCallOptimziedMethod);
__ Movq(rsp, argV);
auto argvSlotOffset = kungfu::ArgumentAccessor::GetExtraArgsNum() + 1; // 1: return addr
__ Addq(argvSlotOffset * FRAME_SLOT_SIZE, argV); // skip return addr and argc
@ -379,14 +373,6 @@ void OptimizedCall::GenJSCall(ExtendedAssembler *assembler, bool isNew)
}
}
__ Bind(&lCallOptimziedMethod);
Register codeAddrReg = rsi;
Register expectedNumArgsReg = rcx;
{
CallOptimziedMethodInternal(assembler, glueReg, jsFuncReg,
methodCallField, argc, codeAddrReg, expectedNumArgsReg);
}
__ Bind(&lCallNativeMethod);
{
Register nativePointer = rsi;
@ -566,32 +552,6 @@ void OptimizedCall::ThrowNonCallableInternal(ExtendedAssembler *assembler, Regis
__ Ret();
}
void OptimizedCall::CallOptimziedMethodInternal(ExtendedAssembler *assembler, Register glueReg, Register jsFuncReg,
Register methodCallField, Register argc,
Register codeAddrReg, Register expectedNumArgsReg)
{
Label lDirectCallCodeEntry;
Register method = rdx;
__ Mov(Operand(jsFuncReg, JSFunctionBase::METHOD_OFFSET), method); // get method
__ Mov(Operand(method, Method::CODE_ENTRY_OFFSET), codeAddrReg); // get codeAddress
__ Movq(argc, rdx); // argc -> rdx
__ Shr(MethodLiteral::NumArgsBits::START_BIT, methodCallField);
__ Andl(((1LU << MethodLiteral::NumArgsBits::SIZE) - 1), methodCallField);
__ Addl(NUM_MANDATORY_JSFUNC_ARGS, methodCallField); // add mandatory argumentr
__ Movq(rsp, r8);
Register argvReg = r8;
auto funcSlotOffset = kungfu::ArgumentAccessor::GetExtraArgsNum() + 1; // 1: return addr
__ Addq(funcSlotOffset * FRAME_SLOT_SIZE, argvReg); // skip return addr and argc
__ Cmpl(expectedNumArgsReg, rdx); // expectedNumArgs <= actualNumArgs
__ Jge(&lDirectCallCodeEntry);
__ CallAssemblerStub(RTSTUB_ID(OptimizedCallOptimized), true);
__ Bind(&lDirectCallCodeEntry);
{
__ Movq(glueReg, rax); // rax = glue
__ Jmp(codeAddrReg);
}
}
void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Register jsFuncReg, Label *jsCall)
{
Label lAlign16Bytes2;
@ -600,6 +560,10 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re
Label lPushCallTarget;
Label lCopyBoundArgumentLoop;
Label lPopFrame2;
Label slowCall;
Label aotCall;
Label popArgs;
Label isJsFunc;
__ Pushq(rbp);
__ Pushq(static_cast<int32_t>(FrameType::OPTIMIZED_JS_FUNCTION_ARGS_CONFIG_FRAME));
__ Leaq(Operand(rsp, FRAME_SLOT_SIZE), rbp);
@ -653,14 +617,46 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re
}
__ Bind(&lPushCallTarget);
{
__ Mov(Operand(jsFuncReg, JSBoundFunction::BOUND_THIS_OFFSET), rax); // thisObj
__ Pushq(rax);
__ Mov(Operand(jsFuncReg, JSBoundFunction::BOUND_THIS_OFFSET), r8); // thisObj
__ Pushq(r8);
__ Pushq(JSTaggedValue::VALUE_UNDEFINED); // newTarget
__ Mov(Operand(jsFuncReg, JSBoundFunction::BOUND_TARGET_OFFSET), rax); // callTarget
__ Pushq(rax);
__ Pushq(r10); // push actual arguments
}
JSCallCheck(assembler, rax, &slowCall, &slowCall, &isJsFunc); // jsfunc -> rsi hclassfiled -> rax
Register jsfunc = rsi;
Register methodCallField = rcx;
Register method = rdx;
__ Bind(&isJsFunc);
{
__ Btq(JSHClass::ClassConstructorBit::START_BIT, rax); // is CallConstructor
__ Jb(&slowCall);
__ Mov(Operand(rsi, JSFunctionBase::METHOD_OFFSET), method); // get method
__ Mov(Operand(method, Method::CALL_FIELD_OFFSET), methodCallField); // get call field
__ Btq(MethodLiteral::IsAotCodeBit::START_BIT, methodCallField); // is aot
__ Jb(&aotCall);
__ Bind(&aotCall);
{
// output: glue:rdi argc:rsi calltarget:rdx argv:rcx this:r8 newtarget:r9
__ Movq(jsfunc, rdx);
__ Movq(r10, rsi);
__ Leaq(Operand(rsp, 4 * FRAME_SLOT_SIZE), rcx); // 4: skip argc and func new this
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
__ Movq(kungfu::CommonStubCSigns::JsBoundCallInternal, r10);
__ Movq(Operand(rdi, r10, Scale::Times8, JSThread::GlueData::GetCOStubEntriesOffset(false)), rax);
__ Callq(rax); // call JSCall
__ Jmp(&popArgs);
}
}
__ Bind(&slowCall);
{
__ Movq(rdi, rax);
__ Callq(jsCall); // call JSCall
__ Jmp(&popArgs);
}
__ Bind(&popArgs);
{
__ Pop(r10);
__ Leaq(Operand(r10, Scale::Times8, 0), rcx); // 8: disp
__ Addq(rcx, rsp);
@ -668,7 +664,6 @@ void OptimizedCall::JSBoundFunctionCallInternal(ExtendedAssembler *assembler, Re
__ Jne(&lPopFrame2);
__ Addq(8, rsp); // 8: align byte
}
__ Bind(&lPopFrame2);
{
__ Pop(r10);
@ -942,19 +937,15 @@ void OptimizedCall::PopOptimizedUnfoldArgVFrame(ExtendedAssembler *assembler)
//
// * OptimizedJSFunctionFrame layout description as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | . . . . . |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | this |
// +--------------------------+
// | new-target |
// +--------------------------+
// | call-target |
// | argn |
// |--------------------------|
// | argc |
// | argn - 1 |
// |--------------------------|
// | ..... |
// |--------------------------|
// | arg2 |
// |--------------------------|
// | arg1 |
// sp ----> |--------------------------| ---------------
// | returnAddr | ^
// |--------------------------| |
@ -965,7 +956,7 @@ void OptimizedCall::PopOptimizedUnfoldArgVFrame(ExtendedAssembler *assembler)
// | call-target | v
// +--------------------------+ ---------------
void OptimizedCall::GenJSCallWithArgV(ExtendedAssembler *assembler, bool isNew)
void OptimizedCall::GenJSCallWithArgV(ExtendedAssembler *assembler, bool needAddExpectedArgs)
{
Register sp(rsp);
Register glue(rdi);
@ -994,27 +985,50 @@ void OptimizedCall::GenJSCallWithArgV(ExtendedAssembler *assembler, bool isNew)
__ Addq(Immediate(NUM_MANDATORY_JSFUNC_ARGS), actualNumArgs);
__ Pushq(actualNumArgs);
__ Movq(glue, rax);
if (isNew) {
__ CallAssemblerStub(RTSTUB_ID(JSCallNew), false);
if (needAddExpectedArgs) {
__ CallAssemblerStub(RTSTUB_ID(OptimizedCallAndPushUndefined), false);
} else {
__ CallAssemblerStub(RTSTUB_ID(JSCall), false);
__ CallAssemblerStub(RTSTUB_ID(CallOptimized), false);
}
__ Mov(Operand(sp, 0), actualNumArgs);
PopJSFunctionArgs(assembler, actualNumArgs);
PopOptimizedUnfoldArgVFrame(assembler);
__ Ret();
}
// * uint64_t JSCallWithArgVAndPushUndefined(uintptr_t glue, uint32_t argc, JSTaggedType calltarget,
// JSTaggedType new, JSTaggedType this, argV)
// * cc calling convention call js function()
// * arguments:
// %rdi - glue
// %rsi - argc
// %rdx - call-target
// %rcx - new-target
// %r8 - this
// %r9 - argv
void OptimizedCall::JSCallWithArgVAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSCallWithArgVAndPushUndefined));
GenJSCallWithArgV(assembler, true);
}
void OptimizedCall::JSCallWithArgV(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSCallWithArgV));
GenJSCallWithArgV(assembler, false);
}
void OptimizedCall::JSCallNewWithArgV(ExtendedAssembler *assembler)
void OptimizedCall::CallOptimized(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSCallNewWithArgV));
GenJSCallWithArgV(assembler, true);
__ BindAssemblerStub(RTSTUB_ID(CallOptimized));
Register jsFuncReg = rdi;
Register method = r9;
Register codeAddrReg = rsi;
__ Movq(Operand(rsp, DOUBLE_SLOT_SIZE), jsFuncReg); // sp + 16 get jsFunc
__ Mov(Operand(jsFuncReg, JSFunctionBase::METHOD_OFFSET), method); // get method
__ Mov(Operand(method, Method::CODE_ENTRY_OFFSET), codeAddrReg);
__ Jmp(codeAddrReg);
}
// Input: %rdi - glue

View File

@ -0,0 +1,384 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecmascript/compiler/trampoline/x64/common_call.h"
#include "ecmascript/compiler/assembler/assembler.h"
#include "ecmascript/compiler/common_stubs.h"
#include "ecmascript/compiler/rt_call_signature.h"
#include "ecmascript/compiler/argument_accessor.h"
#include "ecmascript/deoptimizer/deoptimizer.h"
#include "ecmascript/ecma_runtime_call_info.h"
#include "ecmascript/frames.h"
#include "ecmascript/js_function.h"
#include "ecmascript/js_thread.h"
#include "ecmascript/message_string.h"
#include "ecmascript/method.h"
#include "ecmascript/runtime_call_id.h"
namespace panda::ecmascript::x64 {
#define __ assembler->
// * uint64_t OptimizedFastCallEntry(uintptr_t glue, uint32_t actualNumArgs, const JSTaggedType argV[],
// uintptr_t prevFp)
// * Arguments:
// %rdi - glue
// %rsi - actualNumArgs
// %rdx - argV
// %rcx - prevFp
void OptimizedFastCall::OptimizedFastCallEntry(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(OptimizedFastCallEntry));
Register glueReg = rdi;
Register argv = rdx;
Register prevFpReg = rcx;
OptimizedCall::PushJSFunctionEntryFrame(assembler, prevFpReg);
__ Movq(argv, r8);
__ Movq(rsi, rcx);
__ Movq(Operand(r8, 0), rsi); // func
__ Movq(Operand(r8, FRAME_SLOT_SIZE), rdx); // thisobj
__ Addq(DOUBLE_SLOT_SIZE, r8);
__ CallAssemblerStub(RTSTUB_ID(JSFastCallWithArgV), false);
__ Popq(prevFpReg);
__ Addq(FRAME_SLOT_SIZE, rsp); // 8: frame type
__ Popq(rbp);
__ Popq(glueReg); // caller restore
__ PopCppCalleeSaveRegisters(); // callee restore
__ Movq(prevFpReg, Operand(glueReg, JSThread::GlueData::GetLeaveFrameOffset(false)));
__ Ret();
}
// * uint64_t OptimizedFastCallAndPushUndefined(uintptr_t glue, uint32_t expectedNumArgs, uint32_t actualNumArgs,
// uintptr_t codeAddr, uintptr_t argv)
// * Arguments wil CC calling convention:
// %rdi - glue
// %rsi - actualNumArgs
// %rdx - func
// %rcx - new target
// %r8 - this
// %r9 - arg0
// * The OptimizedJSFunctionArgsConfig Frame's structure is illustrated as the following:
// +--------------------------+
// | arg[N-1] |
// +--------------------------+
// | . . . . |
// +--------------------------+
// | arg[0] |
// +--------------------------+
// | argC |
// sp ---> +--------------------------+ -----------------
// | | ^
// | prevFP | |
// |--------------------------| OptimizedJSFunctionArgsConfigFrame
// | frameType | |
// | | V
// +--------------------------+ -----------------
void OptimizedFastCall::OptimizedFastCallAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(OptimizedFastCallAndPushUndefined));
Register actualNumArgsReg = rsi;
Register jsFuncReg = rdx;
Register thisObj = r8;
Register arg0 = r9;
Label lCopyExtraAument1;
Label lCopyExtraUndefineToSp;
Label lCopyLoop1;
Label call;
Label arg4;
Label argc;
Label checkExpectedArgs;
JsFunctionArgsConfigFrameScope scope(assembler); // push frametype and callee save
__ Movq(actualNumArgsReg, r13);
actualNumArgsReg = r13;
__ Movq(rdx, rsi); // func move to argc
jsFuncReg = rsi;
__ Movq(thisObj, rdx); // this move to func
arg0 = rcx;
Register method = r14;
Register methodCallField = rbx;
Register codeAddrReg = rax;
Register argvReg = r12;
__ Leaq(Operand(rsp, 8 * FRAME_SLOT_SIZE), argvReg); // 8: skip 8 frames to get argv
__ Mov(Operand(jsFuncReg, JSFunctionBase::METHOD_OFFSET), method); // get method
__ Mov(Operand(method, Method::CODE_ENTRY_OFFSET), codeAddrReg); // get codeAddress
__ Mov(Operand(method, Method::CALL_FIELD_OFFSET), methodCallField); // get call field
__ Shr(MethodLiteral::NumArgsBits::START_BIT, methodCallField);
__ Andl(((1LU << MethodLiteral::NumArgsBits::SIZE) - 1), methodCallField);
__ Addl(NUM_MANDATORY_JSFUNC_ARGS, methodCallField); // add mandatory argumentr
Register expectedNumArgsReg = rbx;
Label arg5;
Label arg6;
__ Cmp(Immediate(3), actualNumArgsReg); // 3: func new this
__ Jne(&arg4);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, rcx);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r8);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
__ Subq(3, expectedNumArgsReg); // 3: skip 3 register
__ Jmp(&checkExpectedArgs);
__ Bind(&arg4);
{
__ Movq(r9, rcx); // arg0 to rcx
__ Cmp(Immediate(4), actualNumArgsReg); // 4: func new this arg0
__ Jne(&arg5);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r8);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
__ Subq(3, expectedNumArgsReg); // 3: skip 3 register
__ Jmp(&checkExpectedArgs);
}
__ Bind(&arg5);
{
__ Movq(Operand(argvReg, 0), r8);
__ Addq(FRAME_SLOT_SIZE, argvReg);
__ Cmp(Immediate(5), actualNumArgsReg); // 5: 5 args
__ Jne(&arg6);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
__ Subq(3, expectedNumArgsReg); // 3: skip 3 register
__ Jmp(&checkExpectedArgs);
}
__ Bind(&arg6);
{
__ Movq(Operand(argvReg, 0), r9);
__ Addq(FRAME_SLOT_SIZE, argvReg);
__ Cmp(Immediate(6), actualNumArgsReg); // 6: 6 args
__ Jne(&argc);
__ Subq(3, expectedNumArgsReg); // 3: skip above 3 args
__ Jmp(&checkExpectedArgs);
}
__ Bind(&argc); // actualNumArgsReg >=7
{
__ Subq(6, actualNumArgsReg); // 6: skip above 6 args
__ Subq(6, expectedNumArgsReg); // 6: skip above 6 args
__ Testb(1, expectedNumArgsReg);
__ Je(&lCopyExtraAument1);
__ Pushq(0);
__ Bind(&lCopyExtraAument1); // copy undefined value to stack
__ Pushq(JSTaggedValue::VALUE_UNDEFINED);
__ Subq(1, expectedNumArgsReg);
__ Cmpq(actualNumArgsReg, expectedNumArgsReg);
__ Ja(&lCopyExtraAument1);
__ Bind(&lCopyLoop1);
__ Movq(Operand(argvReg, expectedNumArgsReg, Scale::Times8, -FRAME_SLOT_SIZE), r13); // -8: stack index
__ Pushq(r13);
__ Subq(1, expectedNumArgsReg);
__ Jne(&lCopyLoop1);
__ Jmp(&call);
}
__ Bind(&checkExpectedArgs);
{
__ Cmp(Immediate(3), expectedNumArgsReg); // 3: expectedNumArgsReg <= 3 jump
__ Jbe(&call);
__ Subq(3, expectedNumArgsReg); // 3 : skpi func new this
__ Testb(1, expectedNumArgsReg);
__ Je(&lCopyExtraUndefineToSp);
__ Pushq(0); // expectedNumArgsReg is odd need align
__ Bind(&lCopyExtraUndefineToSp); // copy undefined value to stack
__ Pushq(JSTaggedValue::VALUE_UNDEFINED);
__ Subq(1, expectedNumArgsReg);
__ Cmp(0, expectedNumArgsReg);
__ Ja(&lCopyExtraUndefineToSp);
__ Jmp(&call);
}
__ Bind(&call);
__ Callq(codeAddrReg); // then call jsFunction
}
// * uint64_t JSFastCallWithArgV(uintptr_t glue, uint32_t actualNumArgs, const JSTaggedType argV[], uintptr_t prevFp,
// size_t callType)
// cc callconv
// * Arguments:
// %rdi - glue
// %rsi - func
// %rdx - this
// %rcx - actualNumArgs
// %r8 - argv
void OptimizedFastCall::JSFastCallWithArgV(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSFastCallWithArgV));
Register sp(rsp);
Register callsiteSp = __ AvailableRegister2();
Label align16Bytes;
Label call;
__ Movq(sp, callsiteSp);
__ Addq(Immediate(FRAME_SLOT_SIZE), callsiteSp); // 8 : 8 means skip pc to get last callsitesp
OptimizedUnfoldArgVFrameFrameScope scope(assembler); // push frametype and callee save
__ Movq(rcx, r12);
__ Movq(r8, rbx);
Register actualNumArgs(r12);
Register argV(rbx);
__ Cmp(0, actualNumArgs);
__ Jz(&call);
__ Movq(Operand(argV, 0), rcx); // first arg
__ Addq(FRAME_SLOT_SIZE, argV);
__ Addq(-1, actualNumArgs);
__ Cmp(0, actualNumArgs);
__ Jz(&call);
__ Movq(Operand(argV, 0), r8); // second arg
__ Addq(FRAME_SLOT_SIZE, argV);
__ Addq(-1, actualNumArgs);
__ Cmp(0, actualNumArgs);
__ Jz(&call);
__ Movq(Operand(argV, 0), r9); // third arg
__ Addq(FRAME_SLOT_SIZE, argV);
__ Addq(-1, actualNumArgs);
__ Cmp(0, actualNumArgs);
__ Jz(&call);
__ Testb(1, actualNumArgs);
__ Je(&align16Bytes);
__ PushAlignBytes();
__ Bind(&align16Bytes);
__ Mov(actualNumArgs, rax);
CopyArgumentWithArgV(assembler, rax, argV);
__ Bind(&call);
Register method = r12;
Register jsFuncReg = rsi;
__ Mov(Operand(jsFuncReg, JSFunctionBase::METHOD_OFFSET), method); // get method
__ Mov(Operand(method, Method::CODE_ENTRY_OFFSET), rbx); // get codeAddress
__ Callq(rbx);
}
// cc callconv
// * Arguments:
// %rdi - glue
// %rsi - func
// %rdx - this
// %rcx - actualNumArgs
// %r8 - argv
// %r9 - expectedNumArgs
void OptimizedFastCall::JSFastCallWithArgVAndPushUndefined(ExtendedAssembler *assembler)
{
__ BindAssemblerStub(RTSTUB_ID(JSFastCallWithArgVAndPushUndefined));
Register sp(rsp);
Register callsiteSp = __ AvailableRegister2();
Label call;
Label lCopyExtraAument1;
Label lCopyExtraUndefineToSp;
Label lCopyLoop1;
Label arg1;
Label arg2;
Label arg3;
Label argc;
Label checkExpectedArgs;
__ Movq(sp, callsiteSp);
__ Addq(Immediate(FRAME_SLOT_SIZE), callsiteSp); // 8 : 8 means skip pc to get last callsitesp
OptimizedUnfoldArgVFrameFrame1Scope scope(assembler);
__ Movq(rcx, r12);
__ Movq(r8, rbx);
__ Movq(r9, r14);
Register actualNumArgsReg(r12);
Register expectedNumArgsReg(r14);
Register argV(rbx);
__ Cmp(0, actualNumArgsReg);
__ Jne(&arg1);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, rcx);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r8);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
__ Jmp(&checkExpectedArgs);
__ Bind(&arg1);
{
__ Movq(Operand(argV, 0), rcx); // first arg
__ Addq(FRAME_SLOT_SIZE, argV);
__ Cmp(1, actualNumArgsReg);
__ Jne(&arg2);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r8);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
__ Jmp(&checkExpectedArgs);
}
__ Bind(&arg2);
{
__ Movq(Operand(argV, 0), r8); // second arg
__ Addq(FRAME_SLOT_SIZE, argV);
__ Cmp(2, actualNumArgsReg); // 2: 2 args
__ Jne(&arg3);
__ Movq(JSTaggedValue::VALUE_UNDEFINED, r9);
__ Jmp(&checkExpectedArgs);
}
__ Bind(&arg3);
{
__ Movq(Operand(argV, 0), r9); // third arg
__ Addq(FRAME_SLOT_SIZE, argV);
__ Cmp(3, actualNumArgsReg); // 3: 3 args
__ Jne(&argc);
__ Jmp(&checkExpectedArgs);
}
__ Bind(&argc); // actualNumArgsReg >=4
{
__ Subq(3, actualNumArgsReg); // 3: skip above 3 args
__ Subq(3, expectedNumArgsReg); // 3: skip above 3 args
__ Testb(1, expectedNumArgsReg);
__ Je(&lCopyExtraAument1);
__ Pushq(0);
__ Bind(&lCopyExtraAument1); // copy undefined value to stack
__ Pushq(JSTaggedValue::VALUE_UNDEFINED);
__ Subq(1, expectedNumArgsReg);
__ Cmpq(actualNumArgsReg, expectedNumArgsReg);
__ Ja(&lCopyExtraAument1);
__ Bind(&lCopyLoop1);
__ Movq(Operand(argV, expectedNumArgsReg, Scale::Times8, -FRAME_SLOT_SIZE), r13); // -8: stack index
__ Pushq(r13);
__ Subq(1, expectedNumArgsReg);
__ Jne(&lCopyLoop1);
__ Jmp(&call);
}
__ Bind(&checkExpectedArgs);
{
__ Cmp(Immediate(3), expectedNumArgsReg); // 3:expectedNumArgsReg <= 3 jump
__ Jbe(&call);
__ Subq(3, expectedNumArgsReg); // 3 : skpi func new this
__ Testb(1, expectedNumArgsReg);
__ Je(&lCopyExtraUndefineToSp);
__ Pushq(0); // expectedNumArgsReg is odd need align
__ Bind(&lCopyExtraUndefineToSp); // copy undefined value to stack
__ Pushq(JSTaggedValue::VALUE_UNDEFINED);
__ Subq(1, expectedNumArgsReg);
__ Cmp(0, expectedNumArgsReg);
__ Ja(&lCopyExtraUndefineToSp);
__ Jmp(&call);
}
__ Bind(&call);
Register method = r12;
Register jsFuncReg = rsi;
__ Mov(Operand(jsFuncReg, JSFunctionBase::METHOD_OFFSET), method); // get method
__ Mov(Operand(method, Method::CODE_ENTRY_OFFSET), rbx); // get codeAddress
__ Callq(rbx);
}
#undef __
} // namespace panda::ecmascript::x64

View File

@ -93,7 +93,7 @@ void TSInlineLowering::TryInline(GateRef gate, bool isCallThis)
CircuitRootScope scope(circuit_);
InlineFuncCheck(gate);
InlineCall(methodInfo, methodPcInfo, inlinedMethod, gate);
ReplaceCallInput(gate, isCallThis, glue);
ReplaceCallInput(gate, isCallThis, glue, inlinedMethod);
inlinedCall_++;
}
}
@ -196,7 +196,7 @@ bool TSInlineLowering::CheckParameter(GateRef gate, bool isCallThis, MethodLiter
return declaredNumArgs == (numIns - fixedInputsNum);
}
void TSInlineLowering::ReplaceCallInput(GateRef gate, bool isCallThis, GateRef glue)
void TSInlineLowering::ReplaceCallInput(GateRef gate, bool isCallThis, GateRef glue, MethodLiteral *method)
{
std::vector<GateRef> vec;
size_t numIns = acc_.GetNumValueIn(gate);
@ -214,15 +214,19 @@ void TSInlineLowering::ReplaceCallInput(GateRef gate, bool isCallThis, GateRef g
// -1: callTarget
size_t actualArgc = numIns + NUM_MANDATORY_JSFUNC_ARGS - fixedInputsNum;
vec.emplace_back(glue); // glue
vec.emplace_back(builder_.Int64(actualArgc)); // argc
if (!method->IsFastCall()) {
vec.emplace_back(builder_.Int64(actualArgc)); // argc
}
vec.emplace_back(callTarget);
vec.emplace_back(builder_.Undefined()); // newTarget
if (!method->IsFastCall()) {
vec.emplace_back(builder_.Undefined()); // newTarget
}
vec.emplace_back(thisObj);
// -1: call Target
for (size_t i = fixedInputsNum - 1; i < numIns - 1; i++) {
vec.emplace_back(acc_.GetValueIn(gate, i));
}
LowerToInlineCall(gate, vec);
LowerToInlineCall(gate, vec, method);
}
GateRef TSInlineLowering::MergeAllReturn(const std::vector<GateRef> &returnVector, GateRef &state, GateRef &depend)
@ -325,7 +329,7 @@ void TSInlineLowering::ReplaceHirAndDeleteState(GateRef gate, GateRef state, Gat
acc_.DeleteGate(gate);
}
void TSInlineLowering::LowerToInlineCall(GateRef callGate, const std::vector<GateRef> &args)
void TSInlineLowering::LowerToInlineCall(GateRef callGate, const std::vector<GateRef> &args, MethodLiteral* method)
{
// replace in value/args
ArgumentAccessor argAcc(circuit_);
@ -337,7 +341,12 @@ void TSInlineLowering::LowerToInlineCall(GateRef callGate, const std::vector<Gat
}
// replace in depend and state
GateRef glue = args.at(static_cast<size_t>(CommonArgIdx::GLUE));
GateRef inlineFunc = args.at(static_cast<size_t>(CommonArgIdx::FUNC));
GateRef inlineFunc;
if (method->IsFastCall()) {
inlineFunc = args.at(static_cast<size_t>(FastCallArgIdx::FUNC));
} else {
inlineFunc = args.at(static_cast<size_t>(CommonArgIdx::FUNC));
}
GateRef callerFunc = argAcc.GetFrameArgsIn(callGate, FrameArgIdx::FUNC);
ReplaceEntryGate(callGate, callerFunc, inlineFunc, glue);
// replace use gate

View File

@ -79,7 +79,7 @@ private:
bool FilterInlinedMethod(MethodLiteral* method, std::vector<const uint8_t*> pcOffsets);
bool FilterCallInTryCatch(GateRef gate);
void InlineCall(MethodInfo &methodInfo, MethodPcInfo &methodPCInfo, MethodLiteral* method, GateRef gate);
void ReplaceCallInput(GateRef gate, bool isCallThis, GateRef glue);
void ReplaceCallInput(GateRef gate, bool isCallThis, GateRef glue, MethodLiteral *method);
void ReplaceEntryGate(GateRef callGate, GateRef callerFunc, GateRef inlineFunc, GateRef glue);
void ReplaceReturnGate(GateRef callGate);
@ -89,7 +89,7 @@ private:
GateRef MergeAllReturn(const std::vector<GateRef> &returnVector, GateRef &state, GateRef &depend);
bool CheckParameter(GateRef gate, bool isCallThis, MethodLiteral* method);
void LowerToInlineCall(GateRef gate, const std::vector<GateRef> &args);
void LowerToInlineCall(GateRef gate, const std::vector<GateRef> &args, MethodLiteral* method);
void RemoveRoot();
void BuildFrameStateChain(GateRef gate, BytecodeCircuitBuilder &builder);
GateRef TraceInlineFunction(GateRef glue, GateRef depend, std::vector<GateRef> &args, GateRef callGate);

View File

@ -785,12 +785,11 @@ void TSTypeLowering::LowerTypedNewObjRange(GateRef gate)
// call constructor
size_t range = acc_.GetNumValueIn(gate);
GateRef actualArgc = builder_.Int32(BytecodeCallArgc::ComputeCallArgc(range, EcmaOpcode::NEWOBJRANGE_IMM8_IMM8_V8));
GateRef actualArgc = builder_.Int64(BytecodeCallArgc::ComputeCallArgc(range, EcmaOpcode::NEWOBJRANGE_IMM8_IMM8_V8));
std::vector<GateRef> args { glue_, actualArgc, ctor, ctor, thisObj };
for (size_t i = 1; i < range; ++i) { // 1:skip ctor
args.emplace_back(acc_.GetValueIn(gate, i));
}
GateRef constructGate = builder_.Construct(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), constructGate);
}
@ -815,7 +814,7 @@ void TSTypeLowering::LowerTypedSuperCall(GateRef gate)
// call constructor
size_t range = acc_.GetNumValueIn(gate);
GateRef actualArgc = builder_.Int32(range + 3); // 3: ctor, newTaget, this
GateRef actualArgc = builder_.Int64(range + 3); // 3: ctor, newTaget, this
std::vector<GateRef> args { glue_, actualArgc, superCtor, newTarget, thisObj };
for (size_t i = 0; i < range; ++i) {
args.emplace_back(acc_.GetValueIn(gate, i));
@ -844,6 +843,38 @@ BuiltinsStubCSigns::ID TSTypeLowering::GetBuiltinId(GateRef func)
return id;
}
void TSTypeLowering::CheckCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt,
GateType funcType, const std::vector<GateRef> &args, const std::vector<GateRef> &argsFastCall)
{
if (IsLoadVtable(func)) {
if (tsManager_->CanFastCall(funcGt)) {
builder_.JSFastCallThisTargetTypeCheck(funcType, func);
GateRef result = builder_.TypedFastCall(gate, argsFastCall);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef result = builder_.TypedCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
} else {
int methodIndex = tsManager_->GetMethodIndex(funcGt);
if (!tsManager_->MethodOffsetIsVaild(funcGt) || !tsManager_->FastCallFlagIsVaild(funcGt)
|| methodIndex == -1) {
return;
}
if (tsManager_->CanFastCall(funcGt)) {
builder_.JSFastCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex));
GateRef result = builder_.TypedFastCall(gate, argsFastCall);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
builder_.JSCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex));
GateRef result = builder_.TypedCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
}
}
void TSTypeLowering::LowerTypedCallArg0(GateRef gate)
{
GateRef func = acc_.GetValueIn(gate, 0);
@ -860,23 +891,9 @@ void TSTypeLowering::LowerTypedCallArg0(GateRef gate)
EcmaOpcode::CALLARG0_IMM8));
GateRef newTarget = builder_.Undefined();
GateRef thisObj = builder_.Undefined();
if (IsLoadVtable(func)) {
builder_.JSCallThisTargetTypeCheck(funcType, func);
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
int methodIndex = tsManager_->GetMethodIndex(funcGt);
if (methodIndex == -1) {
return;
}
builder_.JSCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex));
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
std::vector<GateRef> argsFastCall { glue_, func, thisObj };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj };
CheckCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
void TSTypeLowering::LowerTypedCallArg1(GateRef gate)
@ -902,21 +919,9 @@ void TSTypeLowering::LowerTypedCallArg1(GateRef gate)
EcmaOpcode::CALLARG1_IMM8_V8));
GateRef newTarget = builder_.Undefined();
GateRef thisObj = builder_.Undefined();
if (IsLoadVtable(func)) {
builder_.JSCallThisTargetTypeCheck(funcType, func);
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0Value };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
int methodIndex = tsManager_->GetMethodIndex(funcGt);
if (methodIndex == -1) {
return;
}
builder_.JSCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex));
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0Value };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
std::vector<GateRef> argsFastCall { glue_, func, thisObj, a0Value };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0Value };
CheckCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
}
@ -938,21 +943,9 @@ void TSTypeLowering::LowerTypedCallArg2(GateRef gate)
GateRef thisObj = builder_.Undefined();
GateRef a0 = acc_.GetValueIn(gate, 0);
GateRef a1 = acc_.GetValueIn(gate, 1); // 1:first parameter
if (IsLoadVtable(func)) {
builder_.JSCallThisTargetTypeCheck(funcType, func);
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0, a1 };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
int methodIndex = tsManager_->GetMethodIndex(funcGt);
if (methodIndex == -1) {
return;
}
builder_.JSCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex));
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0, a1 };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
std::vector<GateRef> argsFastCall { glue_, func, thisObj, a0, a1 };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0, a1 };
CheckCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
void TSTypeLowering::LowerTypedCallArg3(GateRef gate)
@ -974,26 +967,15 @@ void TSTypeLowering::LowerTypedCallArg3(GateRef gate)
GateRef a0 = acc_.GetValueIn(gate, 0);
GateRef a1 = acc_.GetValueIn(gate, 1);
GateRef a2 = acc_.GetValueIn(gate, 2);
if (IsLoadVtable(func)) {
builder_.JSCallThisTargetTypeCheck(funcType, func);
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0, a1, a2 };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
int methodIndex = tsManager_->GetMethodIndex(funcGt);
if (methodIndex == -1) {
return;
}
builder_.JSCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex));
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0, a1, a2 };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
std::vector<GateRef> argsFastCall { glue_, func, thisObj, a0, a1, a2 };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0, a1, a2 };
CheckCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
void TSTypeLowering::LowerTypedCallrange(GateRef gate)
{
std::vector<GateRef> vec;
std::vector<GateRef> vec1;
size_t numArgs = acc_.GetNumValueIn(gate);
GateRef actualArgc = builder_.Int32(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate),
EcmaOpcode::CALLRANGE_IMM8_IMM8_V8));
@ -1019,19 +1001,13 @@ void TSTypeLowering::LowerTypedCallrange(GateRef gate)
for (size_t i = 0; i < argc; i++) {
vec.emplace_back(acc_.GetValueIn(gate, i));
}
if (IsLoadVtable(func)) {
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef result = builder_.TypedAotCall(gate, vec);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
int methodIndex = tsManager_->GetMethodIndex(funcGt);
if (methodIndex == -1) {
return;
}
builder_.JSCallTargetTypeCheck(funcType, func, builder_.IntPtr(methodIndex));
GateRef result = builder_.TypedAotCall(gate, vec);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
vec1.emplace_back(glue_);
vec1.emplace_back(func);
vec1.emplace_back(thisObj);
for (size_t i = 0; i < argc; i++) {
vec1.emplace_back(acc_.GetValueIn(gate, i));
}
CheckCallTargetAndLowerCall(gate, func, funcGt, funcType, vec, vec1);
}
bool TSTypeLowering::IsLoadVtable(GateRef func)
@ -1061,6 +1037,23 @@ bool TSTypeLowering::CanOptimizeAsFastCall(GateRef func, uint32_t len)
return true;
}
void TSTypeLowering::CheckThisCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt,
GateType funcType, const std::vector<GateRef> &args, const std::vector<GateRef> &argsFastCall)
{
if (!tsManager_->FastCallFlagIsVaild(funcGt)) {
return;
}
if (tsManager_->CanFastCall(funcGt)) {
builder_.JSFastCallThisTargetTypeCheck(funcType, func);
GateRef result = builder_.TypedFastCall(gate, argsFastCall);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef result = builder_.TypedCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
}
void TSTypeLowering::LowerTypedCallthis0(GateRef gate)
{
// 2: number of value inputs
@ -1069,16 +1062,16 @@ void TSTypeLowering::LowerTypedCallthis0(GateRef gate)
if (!CanOptimizeAsFastCall(func, 0)) {
return;
}
GateType funcType = acc_.GetGateType(func);
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef actualArgc = builder_.Int32(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate),
EcmaOpcode::CALLTHIS0_IMM8_V8));
GateRef newTarget = builder_.Undefined();
GateRef thisObj = acc_.GetValueIn(gate, 0);
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
GateType funcType = acc_.GetGateType(func);
GlobalTSTypeRef funcGt = funcType.GetGTRef();
std::vector<GateRef> argsFastCall { glue_, func, thisObj };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj };
CheckThisCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
void TSTypeLowering::LowerTypedCallthis1(GateRef gate)
@ -1099,14 +1092,13 @@ void TSTypeLowering::LowerTypedCallthis1(GateRef gate)
return;
}
GateType funcType = acc_.GetGateType(func);
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef actualArgc = builder_.Int32(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate),
EcmaOpcode::CALLTHIS1_IMM8_V8_V8));
GateRef newTarget = builder_.Undefined();
GlobalTSTypeRef funcGt = funcType.GetGTRef();
std::vector<GateRef> argsFastCall { glue_, func, thisObj, a0 };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0 };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
CheckThisCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
}
@ -1119,17 +1111,18 @@ void TSTypeLowering::LowerTypedCallthis2(GateRef gate)
return;
}
GateType funcType = acc_.GetGateType(func);
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef actualArgc = builder_.Int32(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate),
EcmaOpcode::CALLTHIS2_IMM8_V8_V8_V8));
GateRef newTarget = builder_.Undefined();
GateRef thisObj = acc_.GetValueIn(gate, 0);
GateRef a0Value = acc_.GetValueIn(gate, 1);
GateRef a1Value = acc_.GetValueIn(gate, 2);
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
GlobalTSTypeRef funcGt = funcType.GetGTRef();
std::vector<GateRef> argsFastCall { glue_, func, thisObj, a0Value, a1Value };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value };
CheckThisCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
void TSTypeLowering::LowerTypedCallthis3(GateRef gate)
@ -1141,7 +1134,7 @@ void TSTypeLowering::LowerTypedCallthis3(GateRef gate)
return;
}
GateType funcType = acc_.GetGateType(func);
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef actualArgc = builder_.Int32(BytecodeCallArgc::ComputeCallArgc(acc_.GetNumValueIn(gate),
EcmaOpcode::CALLTHIS3_IMM8_V8_V8_V8_V8));
GateRef newTarget = builder_.Undefined();
@ -1149,10 +1142,11 @@ void TSTypeLowering::LowerTypedCallthis3(GateRef gate)
GateRef a0Value = acc_.GetValueIn(gate, 1);
GateRef a1Value = acc_.GetValueIn(gate, 2);
GateRef a2Value = acc_.GetValueIn(gate, 3);
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value, a2Value };
GateRef result = builder_.TypedAotCall(gate, args);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
GlobalTSTypeRef funcGt = funcType.GetGTRef();
std::vector<GateRef> argsFastCall { glue_, func, thisObj, a0Value, a1Value, a2Value };
std::vector<GateRef> args { glue_, actualArgc, func, newTarget, thisObj, a0Value, a1Value, a2Value };
CheckThisCallTargetAndLowerCall(gate, func, funcGt, funcType, args, argsFastCall);
}
void TSTypeLowering::LowerTypedCallthisrange(GateRef gate)
@ -1170,20 +1164,38 @@ void TSTypeLowering::LowerTypedCallthisrange(GateRef gate)
return;
}
GateType funcType = acc_.GetGateType(func);
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef thisObj = acc_.GetValueIn(gate, 0);
GateRef newTarget = builder_.Undefined();
vec.emplace_back(glue_);
vec.emplace_back(actualArgc);
vec.emplace_back(func);
vec.emplace_back(newTarget);
vec.emplace_back(thisObj);
// add common args
for (size_t i = fixedInputsNum; i < numIns - callTargetIndex; i++) {
vec.emplace_back(acc_.GetValueIn(gate, i));
GlobalTSTypeRef funcGt = funcType.GetGTRef();
if (!tsManager_->FastCallFlagIsVaild(funcGt)) {
return;
}
if (tsManager_->CanFastCall(funcGt)) {
vec.emplace_back(glue_);
vec.emplace_back(func);
vec.emplace_back(thisObj);
// add common args
for (size_t i = fixedInputsNum; i < numIns - callTargetIndex; i++) {
vec.emplace_back(acc_.GetValueIn(gate, i));
}
builder_.JSFastCallThisTargetTypeCheck(funcType, func);
GateRef result = builder_.TypedFastCall(gate, vec);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
} else {
vec.emplace_back(glue_);
vec.emplace_back(actualArgc);
vec.emplace_back(func);
vec.emplace_back(newTarget);
vec.emplace_back(thisObj);
// add common args
for (size_t i = fixedInputsNum; i < numIns - callTargetIndex; i++) {
vec.emplace_back(acc_.GetValueIn(gate, i));
}
builder_.JSCallThisTargetTypeCheck(funcType, func);
GateRef result = builder_.TypedCall(gate, vec);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
GateRef result = builder_.TypedAotCall(gate, vec);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), result);
}
void TSTypeLowering::AddProfiling(GateRef gate)

View File

@ -112,6 +112,10 @@ private:
void LowerTypedCallthisrange(GateRef gate);
bool IsLoadVtable(GateRef func);
bool CanOptimizeAsFastCall(GateRef func, uint32_t len);
void CheckCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt,
GateType funcType, const std::vector<GateRef> &args, const std::vector<GateRef> &argsFastCall);
void CheckThisCallTargetAndLowerCall(GateRef gate, GateRef func, GlobalTSTypeRef funcGt,
GateType funcType, const std::vector<GateRef> &args, const std::vector<GateRef> &argsFastCall);
bool CheckParam(GateRef gate, bool isCallThis, MethodLiteral* method);

View File

@ -65,9 +65,15 @@ void TypeLowering::LowerType(GateRef gate)
case OpCode::JSCALLTARGET_TYPE_CHECK:
LowerJSCallTargetTypeCheck(gate);
break;
case OpCode::JSFASTCALLTARGET_TYPE_CHECK:
LowerJSFastCallTargetTypeCheck(gate);
break;
case OpCode::JSCALLTHISTARGET_TYPE_CHECK:
LowerJSCallThisTargetTypeCheck(gate);
break;
case OpCode::JSFASTCALLTHISTARGET_TYPE_CHECK:
LowerJSFastCallThisTargetTypeCheck(gate);
break;
case OpCode::TYPED_CALL_CHECK:
LowerCallTargetCheck(gate);
break;
@ -1012,6 +1018,32 @@ void TypeLowering::LowerJSCallTargetTypeCheck(GateRef gate)
}
}
void TypeLowering::LowerJSFastCallTargetTypeCheck(GateRef gate)
{
Environment env(gate, circuit_, &builder_);
auto type = acc_.GetParamGateType(gate);
if (tsManager_->IsFunctionTypeKind(type)) {
ArgumentAccessor argAcc(circuit_);
GateRef frameState = GetFrameState(gate);
GateRef jsFunc = argAcc.GetFrameArgsIn(frameState, FrameArgIdx::FUNC);
auto func = acc_.GetValueIn(gate, 0);
auto methodIndex = acc_.GetValueIn(gate, 1);
GateRef isObj = builder_.TaggedIsHeapObject(func);
GateRef isJsFunc = builder_.IsJSFunction(func);
GateRef funcMethodTarget = builder_.GetMethodFromFunction(func);
GateRef canFastCall = builder_.HasAotCodeAndFastCall(funcMethodTarget);
GateRef checkFunc = builder_.BoolAnd(isObj, isJsFunc);
GateRef checkAot = builder_.BoolAnd(checkFunc, canFastCall);
GateRef methodTarget = GetObjectFromConstPool(jsFunc, methodIndex);
GateRef check = builder_.BoolAnd(checkAot, builder_.Equal(funcMethodTarget, methodTarget));
builder_.DeoptCheck(check, frameState, DeoptType::NOTJSFASTCALLTGT);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate());
} else {
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
}
void TypeLowering::LowerJSCallThisTargetTypeCheck(GateRef gate)
{
Environment env(gate, circuit_, &builder_);
@ -1033,6 +1065,27 @@ void TypeLowering::LowerJSCallThisTargetTypeCheck(GateRef gate)
}
}
void TypeLowering::LowerJSFastCallThisTargetTypeCheck(GateRef gate)
{
Environment env(gate, circuit_, &builder_);
auto type = acc_.GetParamGateType(gate);
if (tsManager_->IsFunctionTypeKind(type)) {
GateRef frameState = GetFrameState(gate);
auto func = acc_.GetValueIn(gate, 0);
GateRef isObj = builder_.TaggedIsHeapObject(func);
GateRef isJsFunc = builder_.IsJSFunction(func);
GateRef checkFunc = builder_.BoolAnd(isObj, isJsFunc);
GateRef funcMethodTarget = builder_.GetMethodFromFunction(func);
GateRef canFastCall = builder_.HasAotCodeAndFastCall(funcMethodTarget);
GateRef check = builder_.BoolAnd(checkFunc, canFastCall);
builder_.DeoptCheck(check, frameState, DeoptType::NOTJSFASTCALLTGT);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), Circuit::NullGate());
} else {
LOG_ECMA(FATAL) << "this branch is unreachable";
UNREACHABLE();
}
}
void TypeLowering::LowerCallTargetCheck(GateRef gate)
{
Environment env(gate, circuit_, &builder_);
@ -1090,7 +1143,6 @@ void TypeLowering::LowerTypedNewAllocateThis(GateRef gate, GateRef glue)
builder_.Jump(&exit);
}
builder_.Bind(&exit);
builder_.SetDepend(*thisObj);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), *thisObj);
}
@ -1118,7 +1170,6 @@ void TypeLowering::LowerTypedSuperAllocateThis(GateRef gate, GateRef glue)
builder_.Jump(&exit);
}
builder_.Bind(&exit);
builder_.SetDepend(*thisObj);
acc_.ReplaceGate(gate, builder_.GetState(), builder_.GetDepend(), *thisObj);
}

View File

@ -157,7 +157,9 @@ private:
void LowerTypedCallBuitin(GateRef gate);
void LowerCallTargetCheck(GateRef gate);
void LowerJSCallTargetTypeCheck(GateRef gate);
void LowerJSFastCallTargetTypeCheck(GateRef gate);
void LowerJSCallThisTargetTypeCheck(GateRef gate);
void LowerJSFastCallThisTargetTypeCheck(GateRef gate);
void LowerTypedNewAllocateThis(GateRef gate, GateRef glue);
void LowerTypedSuperAllocateThis(GateRef gate, GateRef glue);
void LowerGetSuperConstructor(GateRef gate);

View File

@ -258,6 +258,7 @@ void Deoptimizier::CollectDeoptBundleVec(std::vector<ARKDeopt>& deoptBundle)
for (; !it.Done() && deoptBundle.empty(); it.Advance<GCVisitedFlag::VISITED>()) {
FrameType type = it.GetFrameType();
switch (type) {
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
auto frame = it.GetFrame<OptimizedJSFunctionFrame>();
frame->GetDeoptBundleInfo(it, deoptBundle);
@ -318,7 +319,8 @@ bool Deoptimizier::CollectVirtualRegisters(Method* method, FrameWriter *frameWri
int32_t actualNumArgs = 0;
int32_t declaredNumArgs = 0;
if (curDepth == 0) {
actualNumArgs = static_cast<int32_t>(frameArgc_) - NUM_MANDATORY_JSFUNC_ARGS;
actualNumArgs = static_cast<int32_t>(GetDeoptValue(curDepth,
static_cast<int32_t>(SpecVregIndex::ACTUAL_ARGC_INDEX)).GetRawData());
declaredNumArgs = static_cast<int32_t>(method->GetNumArgsWithCallField());
} else {
// inline method actualNumArgs equal to declaredNumArgs
@ -326,39 +328,26 @@ bool Deoptimizier::CollectVirtualRegisters(Method* method, FrameWriter *frameWri
declaredNumArgs = method->GetNumArgsWithCallField();
}
bool haveExtra = method->HaveExtraWithCallField();
int32_t callFieldNumVregs = static_cast<int32_t>(method->GetNumVregsWithCallField());
// layout of frame:
// [maybe argc] [actual args] [reserved args] [call field virtual regs]
// [maybe argc]
if (declaredNumArgs != actualNumArgs && haveExtra) {
if (!method->IsFastCall() && declaredNumArgs != actualNumArgs) {
auto value = JSTaggedValue(actualNumArgs);
frameWriter->PushValue(value.GetRawData());
}
int32_t reservedCount = std::max(actualNumArgs, declaredNumArgs);
int32_t virtualIndex = reservedCount + callFieldNumVregs +
int32_t virtualIndex = declaredNumArgs + callFieldNumVregs +
static_cast<int32_t>(method->GetNumRevervedArgs()) - 1;
if (!frameWriter->Reserve(static_cast<size_t>(virtualIndex))) {
return false;
}
// [actual args]
if (declaredNumArgs > actualNumArgs) {
for (int32_t i = 0; i < declaredNumArgs - actualNumArgs; i++) {
frameWriter->PushValue(JSTaggedValue::Undefined().GetRawData());
virtualIndex--;
}
}
for (int32_t i = actualNumArgs - 1; i >= 0; i--) {
for (int32_t i = declaredNumArgs - 1; i >= 0; i--) {
JSTaggedValue value = JSTaggedValue::Undefined();
// deopt value
if (HasDeoptValue(curDepth, virtualIndex)) {
value = GetDeoptValue(curDepth, virtualIndex);
} else {
if (curDepth == 0) {
value = GetActualFrameArgs(i);
}
}
frameWriter->PushValue(value.GetRawData());
virtualIndex--;

View File

@ -32,6 +32,7 @@ enum class SpecVregIndex: int {
FUNC_INDEX = -4,
NEWTARGET_INDEX = -5,
THIS_OBJECT_INDEX = -6,
ACTUAL_ARGC_INDEX = -7,
};
struct Context {

View File

@ -260,6 +260,7 @@ bool GetTypeOffsetAndPrevOffsetFromFrameType(uintptr_t frameType, uintptr_t &typ
prevOffset = OptimizedJSFunctionUnfoldArgVFrame::GetPrevOffset();
break;
case FrameType::OPTIMIZED_JS_FUNCTION_ARGS_CONFIG_FRAME:
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME:
typeOffset = OptimizedJSFunctionFrame::GetTypeOffset();
prevOffset = OptimizedJSFunctionFrame::GetPrevOffset();

View File

@ -428,24 +428,22 @@ JSTaggedValue EcmaVM::InvokeEcmaAotEntrypoint(JSHandle<JSFunction> mainFunc, JSH
const JSPandaFile *jsPandaFile, std::string_view entryPoint)
{
aotFileManager_->SetAOTMainFuncEntry(mainFunc, jsPandaFile, entryPoint);
Method *method = mainFunc->GetCallTarget();
size_t actualNumArgs = method->GetNumArgs();
size_t argsNum = actualNumArgs + NUM_MANDATORY_JSFUNC_ARGS;
std::vector<JSTaggedType> args(argsNum, JSTaggedValue::Undefined().GetRawData());
args[0] = mainFunc.GetTaggedValue().GetRawData();
args[2] = thisArg.GetTaggedValue().GetRawData(); // 2: this
const JSTaggedType *prevFp = thread_->GetLastLeaveFrame();
return JSFunction::InvokeOptimizedEntrypoint(thread_, mainFunc, thisArg, entryPoint);
}
JSTaggedValue EcmaVM::FastCallAot(size_t actualNumArgs, JSTaggedType *args, const JSTaggedType *prevFp)
{
auto entry = thread_->GetRTInterface(kungfu::RuntimeStubCSigns::ID_OptimizedFastCallEntry);
// do not modify this log to INFO, this will call many times
LOG_ECMA(DEBUG) << "start to execute aot entry: " << entryPoint;
JSTaggedValue res = ExecuteAot(actualNumArgs, args.data(), prevFp, OptimizedEntryFrame::CallType::CALL_FUNC);
if (thread_->HasPendingException()) {
return thread_->GetException();
}
LOG_ECMA(DEBUG) << "start to execute aot entry: " << (void*)entry;
auto res = reinterpret_cast<FastCallAotEntryType>(entry)(thread_->GetGlueAddr(),
actualNumArgs,
args,
reinterpret_cast<uintptr_t>(prevFp));
return res;
}
JSTaggedValue EcmaVM::ExecuteAot(size_t actualNumArgs, JSTaggedType *args, const JSTaggedType *prevFp,
OptimizedEntryFrame::CallType callType)
JSTaggedValue EcmaVM::ExecuteAot(size_t actualNumArgs, JSTaggedType *args, const JSTaggedType *prevFp)
{
INTERPRETER_TRACE(thread_, ExecuteAot);
auto entry = thread_->GetRTInterface(kungfu::RuntimeStubCSigns::ID_JSFunctionEntry);
@ -454,8 +452,7 @@ JSTaggedValue EcmaVM::ExecuteAot(size_t actualNumArgs, JSTaggedType *args, const
auto res = reinterpret_cast<JSFunctionEntryType>(entry)(thread_->GetGlueAddr(),
actualNumArgs,
args,
reinterpret_cast<uintptr_t>(prevFp),
static_cast<size_t>(callType));
reinterpret_cast<uintptr_t>(prevFp));
return res;
}

View File

@ -530,8 +530,9 @@ public:
return quickFixManager_;
}
JSTaggedValue ExecuteAot(size_t actualNumArgs, JSTaggedType *args, const JSTaggedType *prevFp,
OptimizedEntryFrame::CallType callType);
JSTaggedValue ExecuteAot(size_t actualNumArgs, JSTaggedType *args, const JSTaggedType *prevFp);
JSTaggedValue FastCallAot(size_t actualNumArgs, JSTaggedType *args, const JSTaggedType *prevFp);
// For icu objects cache
void SetIcuFormatterToCache(IcuFormatterType type, const std::string &locale, void *icuObj,

View File

@ -56,6 +56,7 @@ JSTaggedValue FrameIterator::GetFunction() const
{
FrameType type = GetFrameType();
switch (type) {
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
auto frame = GetFrame<OptimizedJSFunctionFrame>();
return frame->GetFunction();
@ -169,6 +170,7 @@ void FrameIterator::Advance()
current_ = frame->GetPrevFrameFp();
break;
}
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
auto frame = GetFrame<OptimizedJSFunctionFrame>();
if constexpr (GCVisit == GCVisitedFlag::VISITED) {
@ -351,6 +353,7 @@ uintptr_t FrameIterator::GetPrevFrameCallSiteSp() const
return frame->GetCallSiteSp();
}
case FrameType::OPTIMIZED_FRAME:
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
ASSERT(thread_ != nullptr);
auto callSiteSp = reinterpret_cast<uintptr_t>(current_) + fpDeltaPrevFrameSp_;
@ -404,6 +407,7 @@ uint32_t FrameIterator::GetBytecodeOffset() const
auto offset = frame->GetPc() - method->GetBytecodeArray();
return static_cast<uint32_t>(offset);
}
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
auto frame = this->GetFrame<OptimizedJSFunctionFrame>();
ConstInfo constInfo;
@ -505,22 +509,22 @@ void OptimizedJSFunctionFrame::CollectPcOffsetInfo(const FrameIterator &it, Cons
ARK_INLINE void OptimizedJSFunctionFrame::GCIterate(const FrameIterator &it,
const RootVisitor &visitor,
const RootRangeVisitor &rangeVisitor,
const RootBaseAndDerivedVisitor &derivedVisitor) const
[[maybe_unused]] const RootRangeVisitor &rangeVisitor,
const RootBaseAndDerivedVisitor &derivedVisitor, FrameType frameType) const
{
OptimizedJSFunctionFrame *frame = OptimizedJSFunctionFrame::GetFrameFromSp(it.GetSp());
uintptr_t *jsFuncPtr = reinterpret_cast<uintptr_t *>(frame);
uintptr_t jsFuncSlot = ToUintPtr(jsFuncPtr);
visitor(Root::ROOT_FRAME, ObjectSlot(jsFuncSlot));
uintptr_t *preFrameSp = frame->ComputePrevFrameSp(it);
auto argc = frame->GetArgc(preFrameSp);
JSTaggedType *argv = frame->GetArgv(reinterpret_cast<uintptr_t *>(preFrameSp));
if (argc > 0) {
uintptr_t start = ToUintPtr(argv); // argv
uintptr_t end = ToUintPtr(argv + argc);
rangeVisitor(Root::ROOT_FRAME, ObjectSlot(start), ObjectSlot(end));
if (frameType == FrameType::OPTIMIZED_JS_FUNCTION_FRAME) {
uintptr_t *preFrameSp = frame->ComputePrevFrameSp(it);
auto argc = frame->GetArgc(preFrameSp);
JSTaggedType *argv = frame->GetArgv(reinterpret_cast<uintptr_t *>(preFrameSp));
if (argc > 0) {
uintptr_t start = ToUintPtr(argv); // argv
uintptr_t end = ToUintPtr(argv + argc);
rangeVisitor(Root::ROOT_FRAME, ObjectSlot(start), ObjectSlot(end));
}
}
bool ret = it.IteratorStackMap(visitor, derivedVisitor);

View File

@ -114,6 +114,7 @@ enum class FrameType: uintptr_t {
OPTIMIZED_FRAME = 0,
OPTIMIZED_ENTRY_FRAME,
OPTIMIZED_JS_FUNCTION_FRAME,
OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME,
ASM_BRIDGE_FRAME,
LEAVE_FRAME,
LEAVE_FRAME_WITH_ARGV,
@ -479,7 +480,7 @@ public:
}
void GCIterate(const FrameIterator &it, const RootVisitor &visitor, const RootRangeVisitor &rangeVisitor,
const RootBaseAndDerivedVisitor &derivedVisitor) const;
const RootBaseAndDerivedVisitor &derivedVisitor, FrameType frameType) const;
void CollectPcOffsetInfo(const FrameIterator &it, ConstInfo &info) const;
inline JSTaggedValue GetFunction() const
@ -1617,7 +1618,8 @@ public:
bool IsOptimizedJSFunctionFrame(FrameType type) const
{
return type == FrameType::OPTIMIZED_JS_FUNCTION_FRAME;
return type == FrameType::OPTIMIZED_JS_FUNCTION_FRAME ||
type == FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME;
}
bool IsOptimizedJSFunctionFrame() const

View File

@ -183,6 +183,7 @@ JSTaggedValue FrameHandler::GetFunction() const
auto *frame = BuiltinFrame::GetFrameFromSp(sp_);
return frame->GetFunction();
}
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
auto *frame = OptimizedJSFunctionFrame::GetFrameFromSp(sp_);
return frame->GetFunction();
@ -375,9 +376,10 @@ void FrameHandler::IterateFrameChain(JSTaggedType *start, const RootVisitor &vis
frame->GCIterate(it, visitor, rangeVisitor, derivedVisitor);
break;
}
case FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME:
case FrameType::OPTIMIZED_JS_FUNCTION_FRAME: {
auto frame = it.GetFrame<OptimizedJSFunctionFrame>();
frame->GCIterate(it, visitor, rangeVisitor, derivedVisitor);
frame->GCIterate(it, visitor, rangeVisitor, derivedVisitor, type);
break;
}
case FrameType::ASM_INTERPRETER_FRAME:

View File

@ -87,7 +87,8 @@ public:
bool IsOptimizedJSFunctionFrame() const
{
FrameType type = GetFrameType();
return type == FrameType::OPTIMIZED_JS_FUNCTION_FRAME;
return type == FrameType::OPTIMIZED_JS_FUNCTION_FRAME ||
type == FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME;
}
bool IsJSFrame(FrameType type) const
@ -97,7 +98,8 @@ public:
bool IsOptimizedJSFunctionFrame(FrameType type) const
{
return type == FrameType::OPTIMIZED_JS_FUNCTION_FRAME;
return type == FrameType::OPTIMIZED_JS_FUNCTION_FRAME ||
type == FrameType::OPTIMIZED_JS_FAST_CALL_FUNCTION_FRAME;
}
bool IsAsmInterpretedFrame() const

View File

@ -799,6 +799,16 @@ JSTaggedValue EcmaInterpreter::GeneratorReEnterInterpreter(JSThread *thread, JSH
JSTaggedValue EcmaInterpreter::GeneratorReEnterAot(JSThread *thread, JSHandle<GeneratorContext> context)
{
JSHandle<JSFunction> func = JSHandle<JSFunction>::Cast(JSHandle<JSTaggedValue>(thread, context->GetMethod()));
if (func->IsClassConstructor()) {
{
EcmaVM *ecmaVm = thread->GetEcmaVM();
ObjectFactory *factory = ecmaVm->GetFactory();
JSHandle<JSObject> error =
factory->GetJSError(ErrorType::TYPE_ERROR, "class constructor cannot called without 'new'");
thread->SetException(error.GetTaggedValue());
}
return thread->GetException();
}
Method *method = func->GetCallTarget();
JSTaggedValue genObject = context->GetGeneratorObject();
std::vector<JSTaggedType> args(method->GetNumArgs() + NUM_MANDATORY_JSFUNC_ARGS,
@ -807,8 +817,7 @@ JSTaggedValue EcmaInterpreter::GeneratorReEnterAot(JSThread *thread, JSHandle<Ge
args[1] = genObject.GetRawData();
args[2] = context->GetThis().GetRawData(); // 2: this
const JSTaggedType *prevFp = thread->GetLastLeaveFrame();
auto res = thread->GetEcmaVM()->ExecuteAot(method->GetNumArgs(), args.data(), prevFp,
OptimizedEntryFrame::CallType::CALL_FUNC);
auto res = thread->GetEcmaVM()->ExecuteAot(method->GetNumArgs(), args.data(), prevFp);
return res;
}

View File

@ -222,9 +222,27 @@ JSTaggedValue InterpreterAssembly::Execute(EcmaRuntimeCallInfo *info)
ECMAObject *callTarget = reinterpret_cast<ECMAObject*>(info->GetFunctionValue().GetTaggedObject());
Method *method = callTarget->GetCallTarget();
if (method->IsAotWithCallField()) {
JSHandle<JSFunction> func(thread, info->GetFunctionValue());
if (func->IsClassConstructor()) {
{
EcmaVM *ecmaVm = thread->GetEcmaVM();
ObjectFactory *factory = ecmaVm->GetFactory();
JSHandle<JSObject> error =
factory->GetJSError(ErrorType::TYPE_ERROR, "class constructor cannot called without 'new'");
thread->SetException(error.GetTaggedValue());
}
return thread->GetException();
}
const JSTaggedType *prevFp = thread->GetLastLeaveFrame();
auto res =
thread->GetEcmaVM()->ExecuteAot(argc, info->GetArgs(), prevFp, OptimizedEntryFrame::CallType::CALL_FUNC);
JSTaggedValue res;
if (method->IsFastCall()) {
JSTaggedType *stackArgs = info->GetArgs();
stackArgs[1] = stackArgs[0];
res = thread->GetEcmaVM()->FastCallAot(argc, stackArgs + 1, prevFp);
} else {
res = thread->GetEcmaVM()->ExecuteAot(argc, info->GetArgs(), prevFp);
}
const JSTaggedType *curSp = thread->GetCurrentSPFrame();
InterpretedEntryFrame *entryState = InterpretedEntryFrame::GetFrameFromSp(curSp);
JSTaggedType *prevSp = entryState->base.prev;

View File

@ -313,6 +313,45 @@ JSTaggedValue JSFunction::Invoke(EcmaRuntimeCallInfo *info, const JSHandle<JSTag
return JSFunction::Call(info);
}
JSTaggedValue JSFunction::InvokeOptimizedEntrypoint(JSThread *thread, JSHandle<JSFunction> mainFunc,
JSHandle<JSTaggedValue> &thisArg, std::string_view entryPoint)
{
if (mainFunc->IsClassConstructor()) {
{
ObjectFactory *factory = thread->GetEcmaVM()->GetFactory();
JSHandle<JSObject> error =
factory->GetJSError(ErrorType::TYPE_ERROR, "class constructor cannot called without 'new'");
thread->SetException(error.GetTaggedValue());
}
return thread->GetException();
}
Method *method = mainFunc->GetCallTarget();
size_t actualNumArgs = method->GetNumArgs();
const JSTaggedType *prevFp = thread->GetLastLeaveFrame();
JSTaggedValue res;
if (method->IsFastCall()) {
size_t argsNum = actualNumArgs + NUM_MANDATORY_JSFUNC_ARGS - 1;
std::vector<JSTaggedType> args(argsNum, JSTaggedValue::Undefined().GetRawData());
args[0] = mainFunc.GetTaggedValue().GetRawData();
args[1] = thisArg.GetTaggedValue().GetRawData();
// do not modify this log to INFO, this will call many times
LOG_ECMA(DEBUG) << "start to execute aot entry: " << entryPoint;
res = thread->GetEcmaVM()->FastCallAot(actualNumArgs, args.data(), prevFp);
} else {
size_t argsNum = actualNumArgs + NUM_MANDATORY_JSFUNC_ARGS;
std::vector<JSTaggedType> args(argsNum, JSTaggedValue::Undefined().GetRawData());
args[0] = mainFunc.GetTaggedValue().GetRawData();
args[2] = thisArg.GetTaggedValue().GetRawData(); // 2: this
// do not modify this log to INFO, this will call many times
LOG_ECMA(DEBUG) << "start to execute aot entry: " << entryPoint;
res = thread->GetEcmaVM()->ExecuteAot(actualNumArgs, args.data(), prevFp);
}
if (thread->HasPendingException()) {
return thread->GetException();
}
return res;
}
// [[Construct]]
JSTaggedValue JSFunction::ConstructInternal(EcmaRuntimeCallInfo *info)
{
@ -338,15 +377,21 @@ JSTaggedValue JSFunction::ConstructInternal(EcmaRuntimeCallInfo *info)
JSTaggedValue resultValue;
info->SetThis(obj.GetTaggedValue());
Method *method = func->GetCallTarget();
if (method->IsAotWithCallField()) {
if (method->IsAotWithCallField() && func->IsClassConstructor()) {
const JSTaggedType *prevFp = thread->GetLastLeaveFrame();
resultValue = thread->GetEcmaVM()->ExecuteAot(info->GetArgsNumber(), info->GetArgs(), prevFp,
OptimizedEntryFrame::CallType::CALL_NEW);
if (method->IsFastCall()) {
JSTaggedType *stackArgs = info->GetArgs();
stackArgs[1] = stackArgs[0];
resultValue = thread->GetEcmaVM()->FastCallAot(info->GetArgsNumber(), stackArgs + 1, prevFp);
} else {
resultValue = thread->GetEcmaVM()->ExecuteAot(info->GetArgsNumber(), info->GetArgs(), prevFp);
}
const JSTaggedType *curSp = thread->GetCurrentSPFrame();
InterpretedEntryFrame *entryState = InterpretedEntryFrame::GetFrameFromSp(curSp);
JSTaggedType *prevSp = entryState->base.prev;
thread->SetCurrentSPFrame(prevSp);
} else {
method->SetAotCodeBit(false); // if Construct is not ClassConstructor, don't run aot
resultValue = EcmaInterpreter::Execute(info);
}
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);

View File

@ -93,6 +93,8 @@ public:
static JSTaggedValue Construct(EcmaRuntimeCallInfo *info);
static JSTaggedValue Invoke(EcmaRuntimeCallInfo *info, const JSHandle<JSTaggedValue> &key);
static JSTaggedValue InvokeOptimizedEntrypoint(JSThread *thread, JSHandle<JSFunction> mainFunc,
JSHandle<JSTaggedValue> &thisArg, std::string_view entryPoint);
// 9.2.2[[Construct]](argumentsList, newTarget)
// 9.3.2[[Construct]](argumentsList, newTarget)
static JSTaggedValue ConstructInternal(EcmaRuntimeCallInfo *info);

View File

@ -117,6 +117,15 @@ public:
return methodLiteralMap_;
}
MethodLiteral *GetMethodLiteralByIndex(uint32_t index) const
{
auto info = methodLiteralMap_.find(index);
if (info != methodLiteralMap_.end()) {
return info->second;
}
return nullptr;
}
uint32_t GetNumMethods() const
{
return numMethods_;

View File

@ -55,7 +55,7 @@ public:
using IsNativeBit = NumArgsBits::NextFlag; // offset 60
using IsAotCodeBit = IsNativeBit::NextFlag; // offset 61
using IsFastBuiltinBit = IsAotCodeBit::NextFlag; // offset 62
using IsCallNapiBit = IsFastBuiltinBit::NextFlag; // offset 63
using IsFastCallBit = IsFastBuiltinBit::NextFlag; // offset 63
uint64_t GetCallField() const
{
@ -185,14 +185,24 @@ public:
return NumArgsBits::Decode(callField);
}
static uint64_t SetCallNapi(uint64_t callField, bool isCallNapi)
static uint64_t SetIsFastCall(uint64_t callField, bool isFastCall)
{
return IsCallNapiBit::Update(callField, isCallNapi);
return IsFastCallBit::Update(callField, isFastCall);
}
static bool IsCallNapi(uint64_t callField)
void SetIsFastCall(bool isFastCall)
{
return IsCallNapiBit::Decode(callField);
callField_ = IsFastCallBit::Update(callField_, isFastCall);
}
static bool IsFastCall(uint64_t callField)
{
return IsFastCallBit::Decode(callField);
}
bool IsFastCall() const
{
return IsFastCallBit::Decode(callField_);
}
static constexpr size_t METHOD_ARGS_NUM_BITS = 16;

View File

@ -97,14 +97,24 @@ public:
return NumArgsBits::Decode(callField);
}
static uint64_t SetCallNapi(uint64_t callField, bool isCallNapi)
static uint64_t SetCallNapi(uint64_t extraLiteralInfo, bool isCallNapi)
{
return IsCallNapiBit::Update(callField, isCallNapi);
return IsCallNapiBit::Update(extraLiteralInfo, isCallNapi);
}
static bool IsCallNapi(uint64_t callField)
static bool IsCallNapi(uint64_t extraLiteralInfo)
{
return IsCallNapiBit::Decode(callField);
return IsCallNapiBit::Decode(extraLiteralInfo);
}
static uint64_t SetIsFastCall(uint64_t callField, bool isFastCall)
{
return IsFastCallBit::Update(callField, isFastCall);
}
static bool IsFastCall(uint64_t callField)
{
return IsFastCallBit::Decode(callField);
}
void SetNumArgsWithCallField(uint32_t numargs)
@ -313,17 +323,30 @@ public:
return GetBuiltinId(extraLiteralInfo);
}
void SetIsFastCall(bool isFastCall)
{
uint64_t callFiled = GetCallField();
uint64_t newValue = SetIsFastCall(callFiled, isFastCall);
SetCallField(newValue);
}
bool IsFastCall() const
{
uint64_t callFiled = GetCallField();
return IsFastCall(callFiled);
}
void SetCallNapi(bool isCallNapi)
{
uint64_t callField = GetCallField();
uint64_t newValue = MethodLiteral::SetCallNapi(callField, isCallNapi);
SetCallField(newValue);
uint64_t extraLiteralInfo = GetExtraLiteralInfo();
uint64_t newValue = SetCallNapi(extraLiteralInfo, isCallNapi);
SetExtraLiteralInfo(newValue);
}
bool IsCallNapi() const
{
uint64_t callField = GetCallField();
return MethodLiteral::IsCallNapi(callField);
uint64_t extraLiteralInfo = GetExtraLiteralInfo();
return IsCallNapi(extraLiteralInfo);
}
void SetBuiltinId(uint8_t id)
@ -394,6 +417,7 @@ public:
/* callfield */
static constexpr size_t VREGS_ARGS_NUM_BITS = 28; // 28: maximum 268,435,455
static constexpr uint64_t AOT_FASTCALL_BITS = 0x5; // 0x5LU: aot and fastcall bit field
using HaveThisBit = BitField<bool, 0, 1>; // offset 0
using HaveNewTargetBit = HaveThisBit::NextFlag; // offset 1
using HaveExtraBit = HaveNewTargetBit::NextFlag; // offset 2
@ -403,7 +427,7 @@ public:
using IsNativeBit = NumArgsBits::NextFlag; // offset 60
using IsAotCodeBit = IsNativeBit::NextFlag; // offset 61
using IsFastBuiltinBit = IsAotCodeBit::NextFlag; // offset 62
using IsCallNapiBit = IsFastBuiltinBit::NextFlag; // offset 63
using IsFastCallBit = IsFastBuiltinBit::NextFlag; // offset 63
/* ExtraLiteralInfo */
static constexpr size_t BUILTINID_NUM_BITS = 8;
@ -414,6 +438,7 @@ public:
using FunctionKindBits = BuiltinIdBits::NextField<FunctionKind, FUNCTION_KIND_NUM_BITS>; // offset 8-11
using DeoptCountBits = FunctionKindBits::NextField<uint8_t, DEOPT_THRESHOLD_BITS>; // offset 12-19
using DeoptTypeBits = DeoptCountBits::NextField<kungfu::DeoptType, DEOPTTYPE_NUM_BITS>; // offset 20-27
using IsCallNapiBit = DeoptTypeBits::NextFlag; // offset 28
static constexpr size_t CONSTANT_POOL_OFFSET = TaggedObjectSize();
ACCESSORS(ConstantPool, CONSTANT_POOL_OFFSET, PROFILE_TYPE_INFO_OFFSET)

View File

@ -2467,11 +2467,17 @@ JSTaggedValue RuntimeStubs::RuntimeOptConstructGeneric(JSThread *thread, JSHandl
CVector<JSTaggedType> values;
Method *method = ctor->GetCallTarget();
bool isAotMethod = method->IsAotWithCallField();
if (isAotMethod) {
values.reserve(size + NUM_MANDATORY_JSFUNC_ARGS);
values.emplace_back(ctor.GetTaggedValue().GetRawData());
values.emplace_back(newTgt.GetTaggedValue().GetRawData());
values.emplace_back(obj.GetTaggedValue().GetRawData());
if (isAotMethod && ctor->IsClassConstructor()) {
if (method->IsFastCall()) {
values.reserve(size + NUM_MANDATORY_JSFUNC_ARGS - 1);
values.emplace_back(ctor.GetTaggedValue().GetRawData());
values.emplace_back(obj.GetTaggedValue().GetRawData());
} else {
values.reserve(size + NUM_MANDATORY_JSFUNC_ARGS);
values.emplace_back(ctor.GetTaggedValue().GetRawData());
values.emplace_back(newTgt.GetTaggedValue().GetRawData());
values.emplace_back(obj.GetTaggedValue().GetRawData());
}
} else {
values.reserve(size);
}
@ -2491,11 +2497,15 @@ JSTaggedValue RuntimeStubs::RuntimeOptConstructGeneric(JSThread *thread, JSHandl
}
}
JSTaggedValue resultValue;
if (isAotMethod) {
if (isAotMethod && ctor->IsClassConstructor()) {
const JSTaggedType *prevFp = thread->GetLastLeaveFrame();
resultValue =
thread->GetEcmaVM()->ExecuteAot(size, values.data(), prevFp, OptimizedEntryFrame::CallType::CALL_NEW);
if (ctor->GetCallTarget()->IsFastCall()) {
resultValue = thread->GetEcmaVM()->FastCallAot(size, values.data(), prevFp);
} else {
resultValue = thread->GetEcmaVM()->ExecuteAot(size, values.data(), prevFp);
}
} else {
ctor->GetCallTarget()->SetAotCodeBit(false); // if Construct is not ClassConstructor, don't run aot
EcmaRuntimeCallInfo *info =
EcmaInterpreter::NewRuntimeCallInfo(thread, JSHandle<JSTaggedValue>(ctor), obj, newTgt, size);
RETURN_EXCEPTION_IF_ABRUPT_COMPLETION(thread);

View File

@ -38,10 +38,13 @@ class GeneratorContext;
struct EcmaRuntimeCallInfo;
using JSFunctionEntryType = JSTaggedValue (*)(uintptr_t glue, uint32_t argc, const JSTaggedType argV[],
uintptr_t prevFp, size_t callType);
uintptr_t prevFp);
using FastCallAotEntryType = JSTaggedValue (*)(uintptr_t glue, uint32_t argc, const JSTaggedType argV[],
uintptr_t prevFp);
#define RUNTIME_ASM_STUB_LIST(V) \
JS_CALL_TRAMPOLINE_LIST(V) \
FAST_CALL_TRAMPOLINE_LIST(V) \
ASM_INTERPRETER_TRAMPOLINE_LIST(V)
#define ASM_INTERPRETER_TRAMPOLINE_LIST(V) \
@ -76,11 +79,18 @@ using JSFunctionEntryType = JSTaggedValue (*)(uintptr_t glue, uint32_t argc, con
V(JSFunctionEntry) \
V(JSCall) \
V(JSCallWithArgV) \
V(JSCallWithArgVAndPushUndefined) \
V(JSProxyCallInternalWithArgV) \
V(OptimizedCallOptimized) \
V(OptimizedCallAndPushUndefined) \
V(DeoptHandlerAsm) \
V(JSCallNew) \
V(JSCallNewWithArgV)
V(CallOptimized)
#define FAST_CALL_TRAMPOLINE_LIST(V) \
V(OptimizedFastCallEntry) \
V(OptimizedFastCallAndPushUndefined) \
V(JSFastCallWithArgV) \
V(JSFastCallWithArgVAndPushUndefined)
#define RUNTIME_STUB_WITHOUT_GC_LIST(V) \

View File

@ -501,6 +501,39 @@ bool TSManager::IsMethodSignature(GlobalTSTypeRef gt) const
return functionType->GetIsSignature();
}
bool TSManager::CanFastCall(GlobalTSTypeRef gt) const
{
if (!IsFunctionTypeKind(gt)) {
return false;
}
JSHandle<JSTaggedValue> tsType = GetTSType(gt);
ASSERT(tsType->IsTSFunctionType());
JSHandle<TSFunctionType> functionType(tsType);
return functionType->GetIsFastCall();
}
bool TSManager::MethodOffsetIsVaild(GlobalTSTypeRef gt) const
{
if (!IsFunctionTypeKind(gt)) {
return false;
}
JSHandle<JSTaggedValue> tsType = GetTSType(gt);
ASSERT(tsType->IsTSFunctionType());
JSHandle<TSFunctionType> functionType(tsType);
return functionType->GetIsMethodOffsetVaild();
}
bool TSManager::FastCallFlagIsVaild(GlobalTSTypeRef gt) const
{
if (!IsFunctionTypeKind(gt)) {
return false;
}
JSHandle<JSTaggedValue> tsType = GetTSType(gt);
ASSERT(tsType->IsTSFunctionType());
JSHandle<TSFunctionType> functionType(tsType);
return functionType->GetIsFastCallVaild();
}
GlobalTSTypeRef TSManager::GetFuncReturnValueTypeGT(GlobalTSTypeRef gt) const
{
ASSERT(IsFunctionTypeKind(gt));

View File

@ -286,6 +286,9 @@ public:
bool IsAbstractMethod(GlobalTSTypeRef gt) const;
bool IsMethodSignature(GlobalTSTypeRef gt) const;
bool CanFastCall(GlobalTSTypeRef gt) const;
bool MethodOffsetIsVaild(GlobalTSTypeRef gt) const;
bool FastCallFlagIsVaild(GlobalTSTypeRef gt) const;
inline GlobalTSTypeRef PUBLIC_API GetFuncReturnValueTypeGT(kungfu::GateType gateType) const
{

View File

@ -204,6 +204,9 @@ public:
NEXT_BIT_FIELD(BitField, IsGetterSetter, bool, ONE_BIT, Generator);
NEXT_BIT_FIELD(BitField, IsAbstract, bool, ONE_BIT, IsGetterSetter);
NEXT_BIT_FIELD(BitField, IsSignature, bool, ONE_BIT, IsAbstract);
NEXT_BIT_FIELD(BitField, IsFastCall, bool, ONE_BIT, IsSignature);
NEXT_BIT_FIELD(BitField, IsFastCallVaild, bool, ONE_BIT, IsFastCall);
NEXT_BIT_FIELD(BitField, IsMethodOffsetVaild, bool, ONE_BIT, IsFastCallVaild);
DECL_VISIT_OBJECT(NAME_OFFSET, RETURN_GT_OFFSET)
DECL_DUMP()

View File

@ -510,6 +510,14 @@ void TSTypeParser::StoreMethodOffset(const JSHandle<TSFunctionType> &functionTyp
uint32_t methodOffset = bcInfo_->IterateFunctionTypeIDAndMethodOffset(typeOffset);
if (methodOffset != 0) {
functionType->SetMethodOffset(methodOffset);
functionType->SetIsMethodOffsetVaild(true);
bool isVaild;
bool canFastCall = bcInfo_->IterateMethodOffsetToCanFastCall(methodOffset, &isVaild);
functionType->SetIsFastCallVaild(isVaild);
functionType->SetIsFastCall(canFastCall);
} else {
functionType->SetIsMethodOffsetVaild(false);
functionType->SetIsFastCallVaild(false);
}
}
}

View File

@ -13,7 +13,7 @@
* limitations under the License.
*/
declare function print(str: any): string;
declare var ArkTools:any;
function foo() {
return "pass";
}
@ -41,3 +41,124 @@ print(foo1(1));
print(foo2(1, 2));
print(foo3(1, 2, 3));
print(foo4(1, 2, 3, 4));
class A {
public mode: number = 1;
constructor(dt: number) {
print(dt);
print(new.target.name);
const size = 50;
this.mode = 4;
}
update (dt: number, dt1: number): number {
print(dt);
print(dt1);
return dt + dt1 + this.mode;
}
}
class B {
public mode: number = 1;
constructor(dt: number, dt1: number, dt2: number) {
print(dt);
print(dt1);
print(dt2);
print(new.target.name);
const size = 50;
this.mode = 4;
}
}
class C {
public mode: number = 1;
constructor(dt: number, dt1: number, dt2: number, dt3: number, dt4: number, dt5: number, dt6: number, dt7: number) {
print(new.target.name);
print(dt);
print(dt1);
print(dt4);
print(dt6);
print(dt7);
const size = 50;
this.mode = 4;
}
}
function funcv(value: number, value1: number, value2: number, value3: number, value4: number, value5: number, value6: number, value7: number): number {
print(value);
print(value1);
print(value2);
print(value3);
print(value4);
print(value5);
print(value6);
print(value7);
return 100;
}
function func0(): number {
return 110;
}
function func1(value: number): number {
print(value);
return value;
}
function func2(value: number, value1: number): number {
print(value);
print(value1);
return value;
}
function func3(value: number, value1: number, value2: number): number {
print(value);
print(value1);
print(value2);
func1(value);
return value;
}
function func4(value: number, value1: number, value2: number, value3: number): number {
print(value);
print(value1);
print(value2);
print(value3);
return value;
}
function testNewTarget(value: number): number {
print(new.target.name);
return value;
}
var systems: A = new A(1);
var systems1: B = new B(2, 3);
var systems2: C = new C(3, 4, 5, 6, 7, 8);
print(func0());
func1();
func2(1);
func3("mytest", 2);
func4(3, 4, 5);
funcv(6, 7 , 8, 9);
systems.update(4);
var k = new testNewTarget(1);
function funcAsm(value: number, value1: number, value2: number): number {
print(value);
print(value1);
print(value2);
func2(value1, value2);
func3(value1, value2);
func4(value1);
funcv(value, value1, value2, value);
var s: A = new A(1, 4);
var s1: B = new B(2, 3);
var s2: C = new C(3, 4, 5, 6, 7, 8);
var s3: C = new C(3, 4, 5, 6, 7, 8, 9, 10);
return value;
}
ArkTools.removeAOTFlag(funcAsm);
funcAsm(1, 2);

View File

@ -18,3 +18,77 @@ pass
3
6
10
1
A
2
3
undefined
B
C
3
4
7
undefined
undefined
110
undefined
1
undefined
mytest
2
undefined
mytest
3
4
5
undefined
6
7
8
9
undefined
undefined
undefined
undefined
4
undefined
testNewTarget
1
2
undefined
2
undefined
2
undefined
undefined
2
2
undefined
undefined
undefined
1
2
undefined
1
undefined
undefined
undefined
undefined
1
A
2
3
undefined
B
C
3
4
7
undefined
undefined
C
3
4
7
9
10