mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-04-06 17:51:44 +00:00
911 lines
28 KiB
C++
911 lines
28 KiB
C++
//===-- WebAssemblyFastISel.cpp - WebAssembly FastISel implementation -----===//
|
|
//
|
|
// The LLVM Compiler Infrastructure
|
|
//
|
|
// This file is distributed under the University of Illinois Open Source
|
|
// License. See LICENSE.TXT for details.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
///
|
|
/// \file
|
|
/// \brief This file defines the WebAssembly-specific support for the FastISel
|
|
/// class. Some of the target-specific code is generated by tablegen in the file
|
|
/// WebAssemblyGenFastISel.inc, which is #included here.
|
|
///
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "WebAssembly.h"
|
|
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
|
|
#include "WebAssemblySubtarget.h"
|
|
#include "WebAssemblyTargetMachine.h"
|
|
#include "llvm/Analysis/BranchProbabilityInfo.h"
|
|
#include "llvm/CodeGen/FastISel.h"
|
|
#include "llvm/CodeGen/FunctionLoweringInfo.h"
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
#include "llvm/IR/DataLayout.h"
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
#include "llvm/IR/Function.h"
|
|
#include "llvm/IR/GetElementPtrTypeIterator.h"
|
|
#include "llvm/IR/GlobalAlias.h"
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
#include "llvm/IR/Instructions.h"
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
#include "llvm/IR/Operator.h"
|
|
using namespace llvm;
|
|
|
|
#define DEBUG_TYPE "wasm-fastisel"
|
|
|
|
namespace {
|
|
|
|
class WebAssemblyFastISel final : public FastISel {
|
|
// All possible address modes.
|
|
class Address {
|
|
public:
|
|
typedef enum { RegBase, FrameIndexBase } BaseKind;
|
|
|
|
private:
|
|
BaseKind Kind;
|
|
union {
|
|
unsigned Reg;
|
|
int FI;
|
|
} Base;
|
|
|
|
int64_t Offset;
|
|
|
|
const GlobalValue *GV;
|
|
|
|
public:
|
|
// Innocuous defaults for our address.
|
|
Address() : Kind(RegBase), Offset(0), GV(0) { Base.Reg = 0; }
|
|
void setKind(BaseKind K) { Kind = K; }
|
|
BaseKind getKind() const { return Kind; }
|
|
bool isRegBase() const { return Kind == RegBase; }
|
|
bool isFIBase() const { return Kind == FrameIndexBase; }
|
|
void setReg(unsigned Reg) {
|
|
assert(isRegBase() && "Invalid base register access!");
|
|
Base.Reg = Reg;
|
|
}
|
|
unsigned getReg() const {
|
|
assert(isRegBase() && "Invalid base register access!");
|
|
return Base.Reg;
|
|
}
|
|
void setFI(unsigned FI) {
|
|
assert(isFIBase() && "Invalid base frame index access!");
|
|
Base.FI = FI;
|
|
}
|
|
unsigned getFI() const {
|
|
assert(isFIBase() && "Invalid base frame index access!");
|
|
return Base.FI;
|
|
}
|
|
|
|
void setOffset(int64_t Offset_) { Offset = Offset_; }
|
|
int64_t getOffset() const { return Offset; }
|
|
void setGlobalValue(const GlobalValue *G) { GV = G; }
|
|
const GlobalValue *getGlobalValue() const { return GV; }
|
|
};
|
|
|
|
/// Keep a pointer to the WebAssemblySubtarget around so that we can make the
|
|
/// right decision when generating code for different targets.
|
|
const WebAssemblySubtarget *Subtarget;
|
|
LLVMContext *Context;
|
|
|
|
private:
|
|
// Utility helper routines
|
|
MVT::SimpleValueType getSimpleType(Type *Ty) {
|
|
EVT VT = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true);
|
|
return VT.isSimple() ? VT.getSimpleVT().SimpleTy :
|
|
MVT::INVALID_SIMPLE_VALUE_TYPE;
|
|
}
|
|
MVT::SimpleValueType getLegalType(MVT::SimpleValueType VT) {
|
|
switch (VT) {
|
|
case MVT::i1:
|
|
case MVT::i8:
|
|
case MVT::i16:
|
|
case MVT::i32:
|
|
return MVT::i32;
|
|
case MVT::i64:
|
|
return MVT::i64;
|
|
default:
|
|
break;
|
|
}
|
|
return MVT::INVALID_SIMPLE_VALUE_TYPE;
|
|
}
|
|
bool computeAddress(const Value *Obj, Address &Addr);
|
|
void materializeLoadStoreOperands(Address &Addr);
|
|
void addLoadStoreOperands(const Address &Addr, const MachineInstrBuilder &MIB,
|
|
MachineMemOperand *MMO);
|
|
unsigned maskI1Value(unsigned Reg, const Value *V);
|
|
unsigned getRegForI1Value(const Value *V);
|
|
unsigned zeroExtendToI32(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From);
|
|
unsigned signExtendToI32(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From);
|
|
unsigned zeroExtend(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From,
|
|
MVT::SimpleValueType To);
|
|
unsigned signExtend(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From,
|
|
MVT::SimpleValueType To);
|
|
unsigned getRegForUnsignedValue(const Value *V);
|
|
unsigned getRegForSignedValue(const Value *V);
|
|
unsigned getRegForPromotedValue(const Value *V, bool IsSigned);
|
|
unsigned notValue(unsigned Reg);
|
|
|
|
// Backend specific FastISel code.
|
|
unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
|
|
unsigned fastMaterializeConstant(const Constant *C) override;
|
|
|
|
// Selection routines.
|
|
bool selectZExt(const Instruction *I);
|
|
bool selectSExt(const Instruction *I);
|
|
bool selectICmp(const Instruction *I);
|
|
bool selectFCmp(const Instruction *I);
|
|
bool selectBitCast(const Instruction *I);
|
|
bool selectLoad(const Instruction *I);
|
|
bool selectStore(const Instruction *I);
|
|
bool selectBr(const Instruction *I);
|
|
bool selectRet(const Instruction *I);
|
|
bool selectUnreachable(const Instruction *I);
|
|
|
|
public:
|
|
// Backend specific FastISel code.
|
|
WebAssemblyFastISel(FunctionLoweringInfo &FuncInfo,
|
|
const TargetLibraryInfo *LibInfo)
|
|
: FastISel(FuncInfo, LibInfo, /*SkipTargetIndependentISel=*/true) {
|
|
Subtarget = &FuncInfo.MF->getSubtarget<WebAssemblySubtarget>();
|
|
Context = &FuncInfo.Fn->getContext();
|
|
}
|
|
|
|
bool fastSelectInstruction(const Instruction *I) override;
|
|
|
|
#include "WebAssemblyGenFastISel.inc"
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {
|
|
|
|
const User *U = nullptr;
|
|
unsigned Opcode = Instruction::UserOp1;
|
|
if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
|
|
// Don't walk into other basic blocks unless the object is an alloca from
|
|
// another block, otherwise it may not have a virtual register assigned.
|
|
if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
|
|
FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
|
|
Opcode = I->getOpcode();
|
|
U = I;
|
|
}
|
|
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
|
|
Opcode = C->getOpcode();
|
|
U = C;
|
|
}
|
|
|
|
if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
|
|
if (Ty->getAddressSpace() > 255)
|
|
// Fast instruction selection doesn't support the special
|
|
// address spaces.
|
|
return false;
|
|
|
|
if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
|
|
if (Addr.getGlobalValue())
|
|
return false;
|
|
Addr.setGlobalValue(GV);
|
|
return true;
|
|
}
|
|
|
|
switch (Opcode) {
|
|
default:
|
|
break;
|
|
case Instruction::BitCast: {
|
|
// Look through bitcasts.
|
|
return computeAddress(U->getOperand(0), Addr);
|
|
}
|
|
case Instruction::IntToPtr: {
|
|
// Look past no-op inttoptrs.
|
|
if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
|
|
TLI.getPointerTy(DL))
|
|
return computeAddress(U->getOperand(0), Addr);
|
|
break;
|
|
}
|
|
case Instruction::PtrToInt: {
|
|
// Look past no-op ptrtoints.
|
|
if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
|
|
return computeAddress(U->getOperand(0), Addr);
|
|
break;
|
|
}
|
|
case Instruction::GetElementPtr: {
|
|
Address SavedAddr = Addr;
|
|
uint64_t TmpOffset = Addr.getOffset();
|
|
// Iterate through the GEP folding the constants into offsets where
|
|
// we can.
|
|
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
|
|
GTI != E; ++GTI) {
|
|
const Value *Op = GTI.getOperand();
|
|
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
|
|
const StructLayout *SL = DL.getStructLayout(STy);
|
|
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
|
|
TmpOffset += SL->getElementOffset(Idx);
|
|
} else {
|
|
uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
|
|
for (;;) {
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
|
|
// Constant-offset addressing.
|
|
TmpOffset += CI->getSExtValue() * S;
|
|
break;
|
|
}
|
|
if (canFoldAddIntoGEP(U, Op)) {
|
|
// A compatible add with a constant operand. Fold the constant.
|
|
ConstantInt *CI =
|
|
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
|
|
TmpOffset += CI->getSExtValue() * S;
|
|
// Iterate on the other operand.
|
|
Op = cast<AddOperator>(Op)->getOperand(0);
|
|
continue;
|
|
}
|
|
// Unsupported
|
|
goto unsupported_gep;
|
|
}
|
|
}
|
|
}
|
|
// Try to grab the base operand now.
|
|
Addr.setOffset(TmpOffset);
|
|
if (computeAddress(U->getOperand(0), Addr))
|
|
return true;
|
|
// We failed, restore everything and try the other options.
|
|
Addr = SavedAddr;
|
|
unsupported_gep:
|
|
break;
|
|
}
|
|
case Instruction::Alloca: {
|
|
const AllocaInst *AI = cast<AllocaInst>(Obj);
|
|
DenseMap<const AllocaInst *, int>::iterator SI =
|
|
FuncInfo.StaticAllocaMap.find(AI);
|
|
if (SI != FuncInfo.StaticAllocaMap.end()) {
|
|
Addr.setKind(Address::FrameIndexBase);
|
|
Addr.setFI(SI->second);
|
|
return true;
|
|
}
|
|
break;
|
|
}
|
|
case Instruction::Add: {
|
|
// Adds of constants are common and easy enough.
|
|
const Value *LHS = U->getOperand(0);
|
|
const Value *RHS = U->getOperand(1);
|
|
|
|
if (isa<ConstantInt>(LHS))
|
|
std::swap(LHS, RHS);
|
|
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
|
|
Addr.setOffset(Addr.getOffset() + CI->getSExtValue());
|
|
return computeAddress(LHS, Addr);
|
|
}
|
|
|
|
Address Backup = Addr;
|
|
if (computeAddress(LHS, Addr) && computeAddress(RHS, Addr))
|
|
return true;
|
|
Addr = Backup;
|
|
|
|
break;
|
|
}
|
|
case Instruction::Sub: {
|
|
// Subs of constants are common and easy enough.
|
|
const Value *LHS = U->getOperand(0);
|
|
const Value *RHS = U->getOperand(1);
|
|
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
|
|
Addr.setOffset(Addr.getOffset() - CI->getSExtValue());
|
|
return computeAddress(LHS, Addr);
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
Addr.setReg(getRegForValue(Obj));
|
|
return Addr.getReg() != 0;
|
|
}
|
|
|
|
void WebAssemblyFastISel::materializeLoadStoreOperands(Address &Addr) {
|
|
if (Addr.isRegBase()) {
|
|
unsigned Reg = Addr.getReg();
|
|
if (Reg == 0) {
|
|
Reg = createResultReg(Subtarget->hasAddr64() ?
|
|
&WebAssembly::I64RegClass :
|
|
&WebAssembly::I32RegClass);
|
|
unsigned Opc = Subtarget->hasAddr64() ?
|
|
WebAssembly::CONST_I64 :
|
|
WebAssembly::CONST_I32;
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Reg)
|
|
.addImm(0);
|
|
Addr.setReg(Reg);
|
|
}
|
|
}
|
|
}
|
|
|
|
void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr,
|
|
const MachineInstrBuilder &MIB,
|
|
MachineMemOperand *MMO) {
|
|
if (const GlobalValue *GV = Addr.getGlobalValue())
|
|
MIB.addGlobalAddress(GV, Addr.getOffset());
|
|
else
|
|
MIB.addImm(Addr.getOffset());
|
|
|
|
if (Addr.isRegBase())
|
|
MIB.addReg(Addr.getReg());
|
|
else
|
|
MIB.addFrameIndex(Addr.getFI());
|
|
|
|
// Set the alignment operand (this is rewritten in SetP2AlignOperands).
|
|
// TODO: Disable SetP2AlignOperands for FastISel and just do it here.
|
|
MIB.addImm(0);
|
|
|
|
MIB.addMemOperand(MMO);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::maskI1Value(unsigned Reg, const Value *V) {
|
|
return zeroExtendToI32(Reg, V, MVT::i1);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::getRegForI1Value(const Value *V) {
|
|
return maskI1Value(getRegForValue(V), V);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::zeroExtendToI32(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From) {
|
|
switch (From) {
|
|
case MVT::i1:
|
|
// If the value is naturally an i1, we don't need to mask it.
|
|
// TODO: Recursively examine selects, phis, and, or, xor, constants.
|
|
if (From == MVT::i1 && V != nullptr && isa<CmpInst>(V))
|
|
return Reg;
|
|
break;
|
|
case MVT::i32:
|
|
return Reg;
|
|
default:
|
|
return 0;
|
|
}
|
|
|
|
unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::CONST_I32), Imm)
|
|
.addImm(~(~uint64_t(0) << MVT(From).getSizeInBits()));
|
|
|
|
unsigned Result = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::AND_I32), Result)
|
|
.addReg(Reg)
|
|
.addReg(Imm);
|
|
|
|
return Result;
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::signExtendToI32(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From) {
|
|
switch (From) {
|
|
case MVT::i1:
|
|
case MVT::i8:
|
|
case MVT::i16:
|
|
break;
|
|
case MVT::i32:
|
|
return Reg;
|
|
default:
|
|
return false;
|
|
}
|
|
|
|
unsigned Imm = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::CONST_I32), Imm)
|
|
.addImm(32 - MVT(From).getSizeInBits());
|
|
|
|
unsigned Left = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::SHL_I32), Left)
|
|
.addReg(Reg)
|
|
.addReg(Imm);
|
|
|
|
unsigned Right = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::SHR_S_I32), Right)
|
|
.addReg(Left)
|
|
.addReg(Imm);
|
|
|
|
return Right;
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::zeroExtend(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From,
|
|
MVT::SimpleValueType To) {
|
|
if (To == MVT::i64) {
|
|
if (From == MVT::i64)
|
|
return Reg;
|
|
|
|
Reg = zeroExtendToI32(Reg, V, From);
|
|
|
|
unsigned Result = createResultReg(&WebAssembly::I64RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::I64_EXTEND_U_I32), Result)
|
|
.addReg(Reg);
|
|
return Result;
|
|
}
|
|
|
|
switch (From) {
|
|
case MVT::i1:
|
|
// If the value is naturally an i1, we don't need to mask it.
|
|
// TODO: Recursively examine selects, phis, and, or, xor, constants.
|
|
if (From == MVT::i1 && V != nullptr && isa<CmpInst>(V))
|
|
return Reg;
|
|
case MVT::i8:
|
|
case MVT::i16:
|
|
break;
|
|
case MVT::i32:
|
|
return Reg;
|
|
default:
|
|
return 0;
|
|
}
|
|
|
|
return zeroExtendToI32(Reg, V, From);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::signExtend(unsigned Reg, const Value *V,
|
|
MVT::SimpleValueType From,
|
|
MVT::SimpleValueType To) {
|
|
if (To == MVT::i64) {
|
|
if (From == MVT::i64)
|
|
return Reg;
|
|
|
|
Reg = signExtendToI32(Reg, V, From);
|
|
|
|
unsigned Result = createResultReg(&WebAssembly::I64RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::I64_EXTEND_S_I32), Result)
|
|
.addReg(Reg);
|
|
return Result;
|
|
}
|
|
|
|
switch (From) {
|
|
case MVT::i1:
|
|
case MVT::i8:
|
|
case MVT::i16:
|
|
break;
|
|
case MVT::i32:
|
|
return Reg;
|
|
default:
|
|
return false;
|
|
}
|
|
|
|
return signExtendToI32(Reg, V, From);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::getRegForUnsignedValue(const Value *V) {
|
|
MVT::SimpleValueType From = getSimpleType(V->getType());
|
|
MVT::SimpleValueType To = getLegalType(From);
|
|
return zeroExtend(getRegForValue(V), V, From, To);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::getRegForSignedValue(const Value *V) {
|
|
MVT::SimpleValueType From = getSimpleType(V->getType());
|
|
MVT::SimpleValueType To = getLegalType(From);
|
|
return zeroExtend(getRegForValue(V), V, From, To);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::getRegForPromotedValue(const Value *V,
|
|
bool IsSigned) {
|
|
return IsSigned ? getRegForSignedValue(V) :
|
|
getRegForUnsignedValue(V);
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::notValue(unsigned Reg) {
|
|
unsigned NotReg = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::EQZ_I32), NotReg)
|
|
.addReg(Reg);
|
|
return NotReg;
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
|
|
DenseMap<const AllocaInst *, int>::iterator SI =
|
|
FuncInfo.StaticAllocaMap.find(AI);
|
|
|
|
if (SI != FuncInfo.StaticAllocaMap.end()) {
|
|
unsigned ResultReg = createResultReg(Subtarget->hasAddr64() ?
|
|
&WebAssembly::I64RegClass :
|
|
&WebAssembly::I32RegClass);
|
|
unsigned Opc = Subtarget->hasAddr64() ?
|
|
WebAssembly::COPY_LOCAL_I64 :
|
|
WebAssembly::COPY_LOCAL_I32;
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
|
|
.addFrameIndex(SI->second);
|
|
return ResultReg;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
unsigned WebAssemblyFastISel::fastMaterializeConstant(const Constant *C) {
|
|
if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
|
|
unsigned Reg = createResultReg(Subtarget->hasAddr64() ?
|
|
&WebAssembly::I64RegClass :
|
|
&WebAssembly::I32RegClass);
|
|
unsigned Opc = Subtarget->hasAddr64() ?
|
|
WebAssembly::CONST_I64 :
|
|
WebAssembly::CONST_I32;
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), Reg)
|
|
.addGlobalAddress(GV);
|
|
return Reg;
|
|
}
|
|
|
|
// Let target-independent code handle it.
|
|
return 0;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectZExt(const Instruction *I) {
|
|
const ZExtInst *ZExt = cast<ZExtInst>(I);
|
|
|
|
unsigned Reg = getRegForUnsignedValue(ZExt->getOperand(0));
|
|
if (Reg == 0)
|
|
return false;
|
|
|
|
updateValueMap(ZExt, Reg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectSExt(const Instruction *I) {
|
|
const SExtInst *SExt = cast<SExtInst>(I);
|
|
|
|
unsigned Reg = getRegForSignedValue(SExt->getOperand(0));
|
|
if (Reg == 0)
|
|
return false;
|
|
|
|
updateValueMap(SExt, Reg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectICmp(const Instruction *I) {
|
|
const ICmpInst *ICmp = cast<ICmpInst>(I);
|
|
|
|
bool I32 = getSimpleType(ICmp->getOperand(0)->getType()) != MVT::i64;
|
|
unsigned Opc;
|
|
bool isSigned = false;
|
|
switch (ICmp->getPredicate()) {
|
|
case ICmpInst::ICMP_EQ:
|
|
Opc = I32 ? WebAssembly::EQ_I32 : WebAssembly::EQ_I64;
|
|
break;
|
|
case ICmpInst::ICMP_NE:
|
|
Opc = I32 ? WebAssembly::NE_I32 : WebAssembly::NE_I64;
|
|
break;
|
|
case ICmpInst::ICMP_UGT:
|
|
Opc = I32 ? WebAssembly::GT_U_I32 : WebAssembly::GT_U_I64;
|
|
break;
|
|
case ICmpInst::ICMP_UGE:
|
|
Opc = I32 ? WebAssembly::GE_U_I32 : WebAssembly::GE_U_I64;
|
|
break;
|
|
case ICmpInst::ICMP_ULT:
|
|
Opc = I32 ? WebAssembly::LT_U_I32 : WebAssembly::LT_U_I64;
|
|
break;
|
|
case ICmpInst::ICMP_ULE:
|
|
Opc = I32 ? WebAssembly::LE_U_I32 : WebAssembly::LE_U_I64;
|
|
break;
|
|
case ICmpInst::ICMP_SGT:
|
|
Opc = I32 ? WebAssembly::GT_S_I32 : WebAssembly::GT_S_I64;
|
|
isSigned = true;
|
|
break;
|
|
case ICmpInst::ICMP_SGE:
|
|
Opc = I32 ? WebAssembly::GE_S_I32 : WebAssembly::GE_S_I64;
|
|
isSigned = true;
|
|
break;
|
|
case ICmpInst::ICMP_SLT:
|
|
Opc = I32 ? WebAssembly::LT_S_I32 : WebAssembly::LT_S_I64;
|
|
isSigned = true;
|
|
break;
|
|
case ICmpInst::ICMP_SLE:
|
|
Opc = I32 ? WebAssembly::LE_S_I32 : WebAssembly::LE_S_I64;
|
|
isSigned = true;
|
|
break;
|
|
default: return false;
|
|
}
|
|
|
|
unsigned LHS = getRegForPromotedValue(ICmp->getOperand(0), isSigned);
|
|
if (LHS == 0)
|
|
return false;
|
|
|
|
unsigned RHS = getRegForPromotedValue(ICmp->getOperand(1), isSigned);
|
|
if (RHS == 0)
|
|
return false;
|
|
|
|
unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
|
|
.addReg(LHS)
|
|
.addReg(RHS);
|
|
updateValueMap(ICmp, ResultReg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectFCmp(const Instruction *I) {
|
|
const FCmpInst *FCmp = cast<FCmpInst>(I);
|
|
|
|
unsigned LHS = getRegForValue(FCmp->getOperand(0));
|
|
if (LHS == 0)
|
|
return false;
|
|
|
|
unsigned RHS = getRegForValue(FCmp->getOperand(1));
|
|
if (RHS == 0)
|
|
return false;
|
|
|
|
bool F32 = getSimpleType(FCmp->getOperand(0)->getType()) != MVT::f64;
|
|
unsigned Opc;
|
|
bool Not = false;
|
|
switch (FCmp->getPredicate()) {
|
|
case FCmpInst::FCMP_OEQ:
|
|
Opc = F32 ? WebAssembly::EQ_F32 : WebAssembly::EQ_F64;
|
|
break;
|
|
case FCmpInst::FCMP_UNE:
|
|
Opc = F32 ? WebAssembly::NE_F32 : WebAssembly::NE_F64;
|
|
break;
|
|
case FCmpInst::FCMP_OGT:
|
|
Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
|
|
break;
|
|
case FCmpInst::FCMP_OGE:
|
|
Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
|
|
break;
|
|
case FCmpInst::FCMP_OLT:
|
|
Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
|
|
break;
|
|
case FCmpInst::FCMP_OLE:
|
|
Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
|
|
break;
|
|
case FCmpInst::FCMP_UGT:
|
|
Opc = F32 ? WebAssembly::LE_F32 : WebAssembly::LE_F64;
|
|
Not = true;
|
|
break;
|
|
case FCmpInst::FCMP_UGE:
|
|
Opc = F32 ? WebAssembly::LT_F32 : WebAssembly::LT_F64;
|
|
Not = true;
|
|
break;
|
|
case FCmpInst::FCMP_ULT:
|
|
Opc = F32 ? WebAssembly::GE_F32 : WebAssembly::GE_F64;
|
|
Not = true;
|
|
break;
|
|
case FCmpInst::FCMP_ULE:
|
|
Opc = F32 ? WebAssembly::GT_F32 : WebAssembly::GT_F64;
|
|
Not = true;
|
|
break;
|
|
default:
|
|
return false;
|
|
}
|
|
|
|
unsigned ResultReg = createResultReg(&WebAssembly::I32RegClass);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
|
|
.addReg(LHS)
|
|
.addReg(RHS);
|
|
|
|
if (Not)
|
|
ResultReg = notValue(ResultReg);
|
|
|
|
updateValueMap(FCmp, ResultReg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectBitCast(const Instruction *I) {
|
|
// Target-independent code can handle this, except it doesn't set the dead
|
|
// flag on the ARGUMENTS clobber, so we have to do that manually in order
|
|
// to satisfy code that expects this of isBitcast() instructions.
|
|
EVT VT = TLI.getValueType(DL, I->getOperand(0)->getType());
|
|
EVT RetVT = TLI.getValueType(DL, I->getType());
|
|
if (!VT.isSimple() || !RetVT.isSimple())
|
|
return false;
|
|
unsigned Reg = fastEmit_ISD_BITCAST_r(VT.getSimpleVT(), RetVT.getSimpleVT(),
|
|
getRegForValue(I->getOperand(0)),
|
|
I->getOperand(0)->hasOneUse());
|
|
if (!Reg)
|
|
return false;
|
|
MachineBasicBlock::iterator Iter = FuncInfo.InsertPt;
|
|
--Iter;
|
|
assert(Iter->isBitcast());
|
|
Iter->setPhysRegsDeadExcept(ArrayRef<unsigned>(), TRI);
|
|
updateValueMap(I, Reg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectLoad(const Instruction *I) {
|
|
const LoadInst *Load = cast<LoadInst>(I);
|
|
if (Load->isAtomic())
|
|
return false;
|
|
|
|
Address Addr;
|
|
if (!computeAddress(Load->getPointerOperand(), Addr))
|
|
return false;
|
|
|
|
// TODO: Fold a following sign-/zero-extend into the load instruction.
|
|
|
|
unsigned Opc;
|
|
const TargetRegisterClass *RC;
|
|
switch (getSimpleType(Load->getType())) {
|
|
case MVT::i1:
|
|
case MVT::i8:
|
|
Opc = WebAssembly::LOAD8_U_I32;
|
|
RC = &WebAssembly::I32RegClass;
|
|
break;
|
|
case MVT::i16:
|
|
Opc = WebAssembly::LOAD16_U_I32;
|
|
RC = &WebAssembly::I32RegClass;
|
|
break;
|
|
case MVT::i32:
|
|
Opc = WebAssembly::LOAD_I32;
|
|
RC = &WebAssembly::I32RegClass;
|
|
break;
|
|
case MVT::i64:
|
|
Opc = WebAssembly::LOAD_I64;
|
|
RC = &WebAssembly::I64RegClass;
|
|
break;
|
|
case MVT::f32:
|
|
Opc = WebAssembly::LOAD_F32;
|
|
RC = &WebAssembly::F32RegClass;
|
|
break;
|
|
case MVT::f64:
|
|
Opc = WebAssembly::LOAD_F64;
|
|
RC = &WebAssembly::F64RegClass;
|
|
break;
|
|
default:
|
|
return false;
|
|
}
|
|
|
|
materializeLoadStoreOperands(Addr);
|
|
|
|
unsigned ResultReg = createResultReg(RC);
|
|
auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
|
|
ResultReg);
|
|
|
|
addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Load));
|
|
|
|
updateValueMap(Load, ResultReg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectStore(const Instruction *I) {
|
|
const StoreInst *Store = cast<StoreInst>(I);
|
|
if (Store->isAtomic())
|
|
return false;
|
|
|
|
Address Addr;
|
|
if (!computeAddress(Store->getPointerOperand(), Addr))
|
|
return false;
|
|
|
|
unsigned Opc;
|
|
const TargetRegisterClass *RC;
|
|
bool VTIsi1 = false;
|
|
switch (getSimpleType(Store->getValueOperand()->getType())) {
|
|
case MVT::i1:
|
|
VTIsi1 = true;
|
|
case MVT::i8:
|
|
Opc = WebAssembly::STORE8_I32;
|
|
RC = &WebAssembly::I32RegClass;
|
|
break;
|
|
case MVT::i16:
|
|
Opc = WebAssembly::STORE16_I32;
|
|
RC = &WebAssembly::I32RegClass;
|
|
break;
|
|
case MVT::i32:
|
|
Opc = WebAssembly::STORE_I32;
|
|
RC = &WebAssembly::I32RegClass;
|
|
break;
|
|
case MVT::i64:
|
|
Opc = WebAssembly::STORE_I64;
|
|
RC = &WebAssembly::I64RegClass;
|
|
break;
|
|
case MVT::f32:
|
|
Opc = WebAssembly::STORE_F32;
|
|
RC = &WebAssembly::F32RegClass;
|
|
break;
|
|
case MVT::f64:
|
|
Opc = WebAssembly::STORE_F64;
|
|
RC = &WebAssembly::F64RegClass;
|
|
break;
|
|
default: return false;
|
|
}
|
|
|
|
materializeLoadStoreOperands(Addr);
|
|
|
|
unsigned ValueReg = getRegForValue(Store->getValueOperand());
|
|
if (VTIsi1)
|
|
ValueReg = maskI1Value(ValueReg, Store->getValueOperand());
|
|
|
|
unsigned ResultReg = createResultReg(RC);
|
|
auto MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
|
|
ResultReg);
|
|
|
|
addLoadStoreOperands(Addr, MIB, createMachineMemOperandFor(Store));
|
|
|
|
MIB.addReg(ValueReg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectBr(const Instruction *I) {
|
|
const BranchInst *Br = cast<BranchInst>(I);
|
|
if (Br->isUnconditional()) {
|
|
MachineBasicBlock *MSucc = FuncInfo.MBBMap[Br->getSuccessor(0)];
|
|
fastEmitBranch(MSucc, Br->getDebugLoc());
|
|
return true;
|
|
}
|
|
|
|
MachineBasicBlock *TBB = FuncInfo.MBBMap[Br->getSuccessor(0)];
|
|
MachineBasicBlock *FBB = FuncInfo.MBBMap[Br->getSuccessor(1)];
|
|
|
|
Value *Cond = Br->getCondition();
|
|
unsigned Opc = WebAssembly::BR_IF;
|
|
if (BinaryOperator::isNot(Cond)) {
|
|
Cond = BinaryOperator::getNotArgument(Cond);
|
|
Opc = WebAssembly::BR_UNLESS;
|
|
}
|
|
|
|
unsigned CondReg = getRegForI1Value(Cond);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
|
|
.addMBB(TBB)
|
|
.addReg(CondReg);
|
|
|
|
finishCondBranch(Br->getParent(), TBB, FBB);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectRet(const Instruction *I) {
|
|
if (!FuncInfo.CanLowerReturn)
|
|
return false;
|
|
|
|
const ReturnInst *Ret = cast<ReturnInst>(I);
|
|
|
|
if (Ret->getNumOperands() == 0) {
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::RETURN_VOID));
|
|
return true;
|
|
}
|
|
|
|
Value *RV = Ret->getOperand(0);
|
|
unsigned Opc;
|
|
switch (getSimpleType(RV->getType())) {
|
|
case MVT::i1: case MVT::i8:
|
|
case MVT::i16: case MVT::i32:
|
|
Opc = WebAssembly::RETURN_I32;
|
|
break;
|
|
case MVT::i64:
|
|
Opc = WebAssembly::RETURN_I64;
|
|
break;
|
|
case MVT::f32: Opc = WebAssembly::RETURN_F32; break;
|
|
case MVT::f64: Opc = WebAssembly::RETURN_F64; break;
|
|
default: return false;
|
|
}
|
|
|
|
unsigned Reg = getRegForValue(RV);
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)).addReg(Reg);
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::selectUnreachable(const Instruction *I) {
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
|
TII.get(WebAssembly::UNREACHABLE));
|
|
return true;
|
|
}
|
|
|
|
bool WebAssemblyFastISel::fastSelectInstruction(const Instruction *I) {
|
|
switch (I->getOpcode()) {
|
|
case Instruction::ZExt: return selectZExt(I);
|
|
case Instruction::SExt: return selectSExt(I);
|
|
case Instruction::ICmp: return selectICmp(I);
|
|
case Instruction::FCmp: return selectFCmp(I);
|
|
case Instruction::BitCast: return selectBitCast(I);
|
|
case Instruction::Load: return selectLoad(I);
|
|
case Instruction::Store: return selectStore(I);
|
|
case Instruction::Br: return selectBr(I);
|
|
case Instruction::Ret: return selectRet(I);
|
|
case Instruction::Unreachable: return selectUnreachable(I);
|
|
default: break;
|
|
}
|
|
|
|
// Fall back to target-independent instruction selection.
|
|
return selectOperator(I, I->getOpcode());
|
|
}
|
|
|
|
FastISel *WebAssembly::createFastISel(FunctionLoweringInfo &FuncInfo,
|
|
const TargetLibraryInfo *LibInfo) {
|
|
return new WebAssemblyFastISel(FuncInfo, LibInfo);
|
|
}
|