Backed out changeset 502f442198b6 (bug 1360248) for build bustage in WasmSignalHandlers.cpp, at least on Windows 2012. r=backout on a CLOSED TREE

This commit is contained in:
Sebastian Hengst 2017-05-10 20:22:11 +02:00
parent e6d07c36b9
commit 67fb8912b0

View File

@ -71,151 +71,6 @@ StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, const VectorT& a
return StackDecrementForCall(masm, alignment, StackArgBytes(args) + extraBytes);
}
static void
SetupABIArguments(MacroAssembler& masm, const FuncExport& fe, Register argv, Register scratch)
{
// Copy parameters out of argv and into the registers/stack-slots specified by
// the system ABI.
for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(ExportArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
switch (iter->kind()) {
case ABIArg::GPR:
if (type == MIRType::Int32)
masm.load32(src, iter->gpr());
else if (type == MIRType::Int64)
masm.load64(src, iter->gpr64());
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
if (type == MIRType::Int64)
masm.load64(src, iter->gpr64());
else
MOZ_CRASH("wasm uses hardfp for function calls.");
break;
#endif
case ABIArg::FPU: {
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
"ExportArg must be big enough to store SIMD values");
switch (type) {
case MIRType::Int8x16:
case MIRType::Int16x8:
case MIRType::Int32x4:
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.loadUnalignedSimd128Int(src, iter->fpu());
break;
case MIRType::Float32x4:
masm.loadUnalignedSimd128Float(src, iter->fpu());
break;
case MIRType::Double:
masm.loadDouble(src, iter->fpu());
break;
case MIRType::Float32:
masm.loadFloat32(src, iter->fpu());
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
break;
}
break;
}
case ABIArg::Stack:
switch (type) {
case MIRType::Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Int64: {
Register sp = masm.getStackPointer();
#if JS_BITS_PER_WORD == 32
masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64LOW_OFFSET));
masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64HIGH_OFFSET));
#else
Register64 scratch64(scratch);
masm.load64(src, scratch64);
masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
#endif
break;
}
case MIRType::Double:
masm.loadDouble(src, ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Float32:
masm.loadFloat32(src, ScratchFloat32Reg);
masm.storeFloat32(ScratchFloat32Reg,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Int8x16:
case MIRType::Int16x8:
case MIRType::Int32x4:
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
masm.storeAlignedSimd128Int(
ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Float32x4:
masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
masm.storeAlignedSimd128Float(
ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
}
break;
}
}
}
static void
StoreABIReturn(MacroAssembler& masm, const FuncExport& fe, Register argv)
{
// Store the return value in argv[0]
switch (fe.sig().ret()) {
case ExprType::Void:
break;
case ExprType::I32:
masm.store32(ReturnReg, Address(argv, 0));
break;
case ExprType::I64:
masm.store64(ReturnReg64, Address(argv, 0));
break;
case ExprType::F32:
if (!JitOptions.wasmTestMode)
masm.canonicalizeFloat(ReturnFloat32Reg);
masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
break;
case ExprType::F64:
if (!JitOptions.wasmTestMode)
masm.canonicalizeDouble(ReturnDoubleReg);
masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
break;
case ExprType::I8x16:
case ExprType::I16x8:
case ExprType::I32x4:
case ExprType::B8x16:
case ExprType::B16x8:
case ExprType::B32x4:
// We don't have control on argv alignment, do an unaligned access.
masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
break;
case ExprType::F32x4:
// We don't have control on argv alignment, do an unaligned access.
masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
break;
case ExprType::Limit:
MOZ_CRASH("Limit");
}
}
#if defined(JS_CODEGEN_ARM)
// The ARM system ABI also includes d15 & s31 in the non volatile float registers.
// Also exclude lr (a.k.a. r14) as we preserve it manually)
@ -316,8 +171,105 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
// Bump the stack for the call.
masm.reserveStack(AlignBytes(StackArgBytes(fe.sig().args()), WasmStackAlignment));
// Copy parameters out of argv and into the wasm ABI registers/stack-slots.
SetupABIArguments(masm, fe, argv, scratch);
// Copy parameters out of argv and into the registers/stack-slots specified by
// the system ABI.
for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(ExportArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
switch (iter->kind()) {
case ABIArg::GPR:
if (type == MIRType::Int32)
masm.load32(src, iter->gpr());
else if (type == MIRType::Int64)
masm.load64(src, iter->gpr64());
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
if (type == MIRType::Int64)
masm.load64(src, iter->gpr64());
else
MOZ_CRASH("wasm uses hardfp for function calls.");
break;
#endif
case ABIArg::FPU: {
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
"ExportArg must be big enough to store SIMD values");
switch (type) {
case MIRType::Int8x16:
case MIRType::Int16x8:
case MIRType::Int32x4:
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.loadUnalignedSimd128Int(src, iter->fpu());
break;
case MIRType::Float32x4:
masm.loadUnalignedSimd128Float(src, iter->fpu());
break;
case MIRType::Double:
masm.loadDouble(src, iter->fpu());
break;
case MIRType::Float32:
masm.loadFloat32(src, iter->fpu());
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
break;
}
break;
}
case ABIArg::Stack:
switch (type) {
case MIRType::Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Int64: {
Register sp = masm.getStackPointer();
#if JS_BITS_PER_WORD == 32
masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64LOW_OFFSET));
masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64HIGH_OFFSET));
#else
Register64 scratch64(scratch);
masm.load64(src, scratch64);
masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
#endif
break;
}
case MIRType::Double:
masm.loadDouble(src, ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Float32:
masm.loadFloat32(src, ScratchFloat32Reg);
masm.storeFloat32(ScratchFloat32Reg,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Int8x16:
case MIRType::Int16x8:
case MIRType::Int32x4:
case MIRType::Bool8x16:
case MIRType::Bool16x8:
case MIRType::Bool32x4:
masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
masm.storeAlignedSimd128Int(
ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
case MIRType::Float32x4:
masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
masm.storeAlignedSimd128Float(
ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
}
break;
}
}
// Set the FramePointer to null for the benefit of debugging.
masm.movePtr(ImmWord(0), FramePointer);
@ -344,8 +296,42 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
// Recover the 'argv' pointer which was saved before aligning the stack.
masm.Pop(argv);
// Store the return value into argv.
StoreABIReturn(masm, fe, argv);
// Store the return value in argv[0]
switch (fe.sig().ret()) {
case ExprType::Void:
break;
case ExprType::I32:
masm.store32(ReturnReg, Address(argv, 0));
break;
case ExprType::I64:
masm.store64(ReturnReg64, Address(argv, 0));
break;
case ExprType::F32:
if (!JitOptions.wasmTestMode)
masm.canonicalizeFloat(ReturnFloat32Reg);
masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
break;
case ExprType::F64:
if (!JitOptions.wasmTestMode)
masm.canonicalizeDouble(ReturnDoubleReg);
masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
break;
case ExprType::I8x16:
case ExprType::I16x8:
case ExprType::I32x4:
case ExprType::B8x16:
case ExprType::B16x8:
case ExprType::B32x4:
// We don't have control on argv alignment, do an unaligned access.
masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
break;
case ExprType::F32x4:
// We don't have control on argv alignment, do an unaligned access.
masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
break;
case ExprType::Limit:
MOZ_CRASH("Limit");
}
// Restore clobbered non-volatile registers of the caller.
masm.PopRegsInMask(NonVolatileRegs);