mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-02 16:56:50 +00:00
Fix line endings.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@293554 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
01e3167e09
commit
b643b21e99
@ -1,208 +1,208 @@
|
||||
//=== X86CallingConv.cpp - X86 Custom Calling Convention Impl -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the implementation of custom routines for the X86
|
||||
// Calling Convention that aren't done by tablegen.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "MCTargetDesc/X86MCTargetDesc.h"
|
||||
#include "X86Subtarget.h"
|
||||
#include "llvm/CodeGen/CallingConvLower.h"
|
||||
#include "llvm/IR/CallingConv.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// List of GPR registers that are available to store values in regcall
|
||||
// calling convention.
|
||||
static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI,
|
||||
X86::ESI};
|
||||
|
||||
// The vector will save all the available registers for allocation.
|
||||
SmallVector<unsigned, 5> AvailableRegs;
|
||||
|
||||
// searching for the available registers.
|
||||
for (auto Reg : RegList) {
|
||||
if (!State.isAllocated(Reg))
|
||||
AvailableRegs.push_back(Reg);
|
||||
}
|
||||
|
||||
const size_t RequiredGprsUponSplit = 2;
|
||||
if (AvailableRegs.size() < RequiredGprsUponSplit)
|
||||
return false; // Not enough free registers - continue the search.
|
||||
|
||||
// Allocating the available registers.
|
||||
for (unsigned I = 0; I < RequiredGprsUponSplit; I++) {
|
||||
|
||||
// Marking the register as located.
|
||||
unsigned Reg = State.AllocateReg(AvailableRegs[I]);
|
||||
|
||||
// Since we previously made sure that 2 registers are available
|
||||
// we expect that a real register number will be returned.
|
||||
assert(Reg && "Expecting a register will be available");
|
||||
|
||||
// Assign the value to the allocated register
|
||||
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
}
|
||||
|
||||
// Successful in allocating regsiters - stop scanning next rules.
|
||||
return true;
|
||||
}
|
||||
|
||||
static ArrayRef<MCPhysReg> CC_X86_VectorCallGetSSEs(const MVT &ValVT) {
|
||||
if (ValVT.is512BitVector()) {
|
||||
static const MCPhysReg RegListZMM[] = {X86::ZMM0, X86::ZMM1, X86::ZMM2,
|
||||
X86::ZMM3, X86::ZMM4, X86::ZMM5};
|
||||
return makeArrayRef(std::begin(RegListZMM), std::end(RegListZMM));
|
||||
}
|
||||
|
||||
if (ValVT.is256BitVector()) {
|
||||
static const MCPhysReg RegListYMM[] = {X86::YMM0, X86::YMM1, X86::YMM2,
|
||||
X86::YMM3, X86::YMM4, X86::YMM5};
|
||||
return makeArrayRef(std::begin(RegListYMM), std::end(RegListYMM));
|
||||
}
|
||||
|
||||
static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2,
|
||||
X86::XMM3, X86::XMM4, X86::XMM5};
|
||||
return makeArrayRef(std::begin(RegListXMM), std::end(RegListXMM));
|
||||
}
|
||||
|
||||
static ArrayRef<MCPhysReg> CC_X86_64_VectorCallGetGPRs() {
|
||||
static const MCPhysReg RegListGPR[] = {X86::RCX, X86::RDX, X86::R8, X86::R9};
|
||||
return makeArrayRef(std::begin(RegListGPR), std::end(RegListGPR));
|
||||
}
|
||||
|
||||
static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT,
|
||||
MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags,
|
||||
CCState &State) {
|
||||
|
||||
ArrayRef<MCPhysReg> RegList = CC_X86_VectorCallGetSSEs(ValVT);
|
||||
bool Is64bit = static_cast<const X86Subtarget &>(
|
||||
State.getMachineFunction().getSubtarget())
|
||||
.is64Bit();
|
||||
|
||||
for (auto Reg : RegList) {
|
||||
// If the register is not marked as allocated - assign to it.
|
||||
if (!State.isAllocated(Reg)) {
|
||||
unsigned AssigedReg = State.AllocateReg(Reg);
|
||||
assert(AssigedReg == Reg && "Expecting a valid register allocation");
|
||||
State.addLoc(
|
||||
CCValAssign::getReg(ValNo, ValVT, AssigedReg, LocVT, LocInfo));
|
||||
return true;
|
||||
}
|
||||
// If the register is marked as shadow allocated - assign to it.
|
||||
if (Is64bit && State.IsShadowAllocatedReg(Reg)) {
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
llvm_unreachable("Clang should ensure that hva marked vectors will have "
|
||||
"an available register.");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// On the second pass, go through the HVAs only.
|
||||
if (ArgFlags.isSecArgPass()) {
|
||||
if (ArgFlags.isHva())
|
||||
return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
|
||||
ArgFlags, State);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Process only vector types as defined by vectorcall spec:
|
||||
// "A vector type is either a floating-point type, for example,
|
||||
// a float or double, or an SIMD vector type, for example, __m128 or __m256".
|
||||
if (!(ValVT.isFloatingPoint() ||
|
||||
(ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
|
||||
// If R9 was already assigned it means that we are after the fourth element
|
||||
// and because this is not an HVA / Vector type, we need to allocate
|
||||
// shadow XMM register.
|
||||
if (State.isAllocated(X86::R9)) {
|
||||
// Assign shadow XMM register.
|
||||
(void)State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ArgFlags.isHva() || ArgFlags.isHvaStart()) {
|
||||
// Assign shadow GPR register.
|
||||
(void)State.AllocateReg(CC_X86_64_VectorCallGetGPRs());
|
||||
|
||||
// Assign XMM register - (shadow for HVA and non-shadow for non HVA).
|
||||
if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
|
||||
// In Vectorcall Calling convention, additional shadow stack can be
|
||||
// created on top of the basic 32 bytes of win64.
|
||||
// It can happen if the fifth or sixth argument is vector type or HVA.
|
||||
// At that case for each argument a shadow stack of 8 bytes is allocated.
|
||||
if (Reg == X86::XMM4 || Reg == X86::XMM5)
|
||||
State.AllocateStack(8, 8);
|
||||
|
||||
if (!ArgFlags.isHva()) {
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
return true; // Allocated a register - Stop the search.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this is an HVA - Stop the search,
|
||||
// otherwise continue the search.
|
||||
return ArgFlags.isHva();
|
||||
}
|
||||
|
||||
bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// On the second pass, go through the HVAs only.
|
||||
if (ArgFlags.isSecArgPass()) {
|
||||
if (ArgFlags.isHva())
|
||||
return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
|
||||
ArgFlags, State);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Process only vector types as defined by vectorcall spec:
|
||||
// "A vector type is either a floating point type, for example,
|
||||
// a float or double, or an SIMD vector type, for example, __m128 or __m256".
|
||||
if (!(ValVT.isFloatingPoint() ||
|
||||
(ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ArgFlags.isHva())
|
||||
return true; // If this is an HVA - Stop the search.
|
||||
|
||||
// Assign XMM register.
|
||||
if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
return true;
|
||||
}
|
||||
|
||||
// In case we did not find an available XMM register for a vector -
|
||||
// pass it indirectly.
|
||||
// It is similar to CCPassIndirect, with the addition of inreg.
|
||||
if (!ValVT.isFloatingPoint()) {
|
||||
LocVT = MVT::i32;
|
||||
LocInfo = CCValAssign::Indirect;
|
||||
ArgFlags.setInReg();
|
||||
}
|
||||
|
||||
return false; // No register was assigned - Continue the search.
|
||||
}
|
||||
|
||||
} // End llvm namespace
|
||||
//=== X86CallingConv.cpp - X86 Custom Calling Convention Impl -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains the implementation of custom routines for the X86
|
||||
// Calling Convention that aren't done by tablegen.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "MCTargetDesc/X86MCTargetDesc.h"
|
||||
#include "X86Subtarget.h"
|
||||
#include "llvm/CodeGen/CallingConvLower.h"
|
||||
#include "llvm/IR/CallingConv.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// List of GPR registers that are available to store values in regcall
|
||||
// calling convention.
|
||||
static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI,
|
||||
X86::ESI};
|
||||
|
||||
// The vector will save all the available registers for allocation.
|
||||
SmallVector<unsigned, 5> AvailableRegs;
|
||||
|
||||
// searching for the available registers.
|
||||
for (auto Reg : RegList) {
|
||||
if (!State.isAllocated(Reg))
|
||||
AvailableRegs.push_back(Reg);
|
||||
}
|
||||
|
||||
const size_t RequiredGprsUponSplit = 2;
|
||||
if (AvailableRegs.size() < RequiredGprsUponSplit)
|
||||
return false; // Not enough free registers - continue the search.
|
||||
|
||||
// Allocating the available registers.
|
||||
for (unsigned I = 0; I < RequiredGprsUponSplit; I++) {
|
||||
|
||||
// Marking the register as located.
|
||||
unsigned Reg = State.AllocateReg(AvailableRegs[I]);
|
||||
|
||||
// Since we previously made sure that 2 registers are available
|
||||
// we expect that a real register number will be returned.
|
||||
assert(Reg && "Expecting a register will be available");
|
||||
|
||||
// Assign the value to the allocated register
|
||||
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
}
|
||||
|
||||
// Successful in allocating regsiters - stop scanning next rules.
|
||||
return true;
|
||||
}
|
||||
|
||||
static ArrayRef<MCPhysReg> CC_X86_VectorCallGetSSEs(const MVT &ValVT) {
|
||||
if (ValVT.is512BitVector()) {
|
||||
static const MCPhysReg RegListZMM[] = {X86::ZMM0, X86::ZMM1, X86::ZMM2,
|
||||
X86::ZMM3, X86::ZMM4, X86::ZMM5};
|
||||
return makeArrayRef(std::begin(RegListZMM), std::end(RegListZMM));
|
||||
}
|
||||
|
||||
if (ValVT.is256BitVector()) {
|
||||
static const MCPhysReg RegListYMM[] = {X86::YMM0, X86::YMM1, X86::YMM2,
|
||||
X86::YMM3, X86::YMM4, X86::YMM5};
|
||||
return makeArrayRef(std::begin(RegListYMM), std::end(RegListYMM));
|
||||
}
|
||||
|
||||
static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2,
|
||||
X86::XMM3, X86::XMM4, X86::XMM5};
|
||||
return makeArrayRef(std::begin(RegListXMM), std::end(RegListXMM));
|
||||
}
|
||||
|
||||
static ArrayRef<MCPhysReg> CC_X86_64_VectorCallGetGPRs() {
|
||||
static const MCPhysReg RegListGPR[] = {X86::RCX, X86::RDX, X86::R8, X86::R9};
|
||||
return makeArrayRef(std::begin(RegListGPR), std::end(RegListGPR));
|
||||
}
|
||||
|
||||
static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT,
|
||||
MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags,
|
||||
CCState &State) {
|
||||
|
||||
ArrayRef<MCPhysReg> RegList = CC_X86_VectorCallGetSSEs(ValVT);
|
||||
bool Is64bit = static_cast<const X86Subtarget &>(
|
||||
State.getMachineFunction().getSubtarget())
|
||||
.is64Bit();
|
||||
|
||||
for (auto Reg : RegList) {
|
||||
// If the register is not marked as allocated - assign to it.
|
||||
if (!State.isAllocated(Reg)) {
|
||||
unsigned AssigedReg = State.AllocateReg(Reg);
|
||||
assert(AssigedReg == Reg && "Expecting a valid register allocation");
|
||||
State.addLoc(
|
||||
CCValAssign::getReg(ValNo, ValVT, AssigedReg, LocVT, LocInfo));
|
||||
return true;
|
||||
}
|
||||
// If the register is marked as shadow allocated - assign to it.
|
||||
if (Is64bit && State.IsShadowAllocatedReg(Reg)) {
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
llvm_unreachable("Clang should ensure that hva marked vectors will have "
|
||||
"an available register.");
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// On the second pass, go through the HVAs only.
|
||||
if (ArgFlags.isSecArgPass()) {
|
||||
if (ArgFlags.isHva())
|
||||
return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
|
||||
ArgFlags, State);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Process only vector types as defined by vectorcall spec:
|
||||
// "A vector type is either a floating-point type, for example,
|
||||
// a float or double, or an SIMD vector type, for example, __m128 or __m256".
|
||||
if (!(ValVT.isFloatingPoint() ||
|
||||
(ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
|
||||
// If R9 was already assigned it means that we are after the fourth element
|
||||
// and because this is not an HVA / Vector type, we need to allocate
|
||||
// shadow XMM register.
|
||||
if (State.isAllocated(X86::R9)) {
|
||||
// Assign shadow XMM register.
|
||||
(void)State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT));
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ArgFlags.isHva() || ArgFlags.isHvaStart()) {
|
||||
// Assign shadow GPR register.
|
||||
(void)State.AllocateReg(CC_X86_64_VectorCallGetGPRs());
|
||||
|
||||
// Assign XMM register - (shadow for HVA and non-shadow for non HVA).
|
||||
if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
|
||||
// In Vectorcall Calling convention, additional shadow stack can be
|
||||
// created on top of the basic 32 bytes of win64.
|
||||
// It can happen if the fifth or sixth argument is vector type or HVA.
|
||||
// At that case for each argument a shadow stack of 8 bytes is allocated.
|
||||
if (Reg == X86::XMM4 || Reg == X86::XMM5)
|
||||
State.AllocateStack(8, 8);
|
||||
|
||||
if (!ArgFlags.isHva()) {
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
return true; // Allocated a register - Stop the search.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this is an HVA - Stop the search,
|
||||
// otherwise continue the search.
|
||||
return ArgFlags.isHva();
|
||||
}
|
||||
|
||||
bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
|
||||
CCValAssign::LocInfo &LocInfo,
|
||||
ISD::ArgFlagsTy &ArgFlags, CCState &State) {
|
||||
// On the second pass, go through the HVAs only.
|
||||
if (ArgFlags.isSecArgPass()) {
|
||||
if (ArgFlags.isHva())
|
||||
return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo,
|
||||
ArgFlags, State);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Process only vector types as defined by vectorcall spec:
|
||||
// "A vector type is either a floating point type, for example,
|
||||
// a float or double, or an SIMD vector type, for example, __m128 or __m256".
|
||||
if (!(ValVT.isFloatingPoint() ||
|
||||
(ValVT.isVector() && ValVT.getSizeInBits() >= 128))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ArgFlags.isHva())
|
||||
return true; // If this is an HVA - Stop the search.
|
||||
|
||||
// Assign XMM register.
|
||||
if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) {
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
|
||||
return true;
|
||||
}
|
||||
|
||||
// In case we did not find an available XMM register for a vector -
|
||||
// pass it indirectly.
|
||||
// It is similar to CCPassIndirect, with the addition of inreg.
|
||||
if (!ValVT.isFloatingPoint()) {
|
||||
LocVT = MVT::i32;
|
||||
LocInfo = CCValAssign::Indirect;
|
||||
ArgFlags.setInReg();
|
||||
}
|
||||
|
||||
return false; // No register was assigned - Continue the search.
|
||||
}
|
||||
|
||||
} // End llvm namespace
|
||||
|
@ -1,118 +1,118 @@
|
||||
//===-- X86InstrCMovSetCC.td - Conditional Move and SetCC --*- tablegen -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes the X86 conditional move and set on condition
|
||||
// instructions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
// CMOV instructions.
|
||||
multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
|
||||
let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
|
||||
isCommutable = 1, SchedRW = [WriteALU] in {
|
||||
def NAME#16rr
|
||||
: I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR16:$dst,
|
||||
(X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))],
|
||||
IIC_CMOV16_RR>, TB, OpSize16;
|
||||
def NAME#32rr
|
||||
: I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR32:$dst,
|
||||
(X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))],
|
||||
IIC_CMOV32_RR>, TB, OpSize32;
|
||||
def NAME#64rr
|
||||
:RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR64:$dst,
|
||||
(X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))],
|
||||
IIC_CMOV32_RR>, TB;
|
||||
}
|
||||
|
||||
let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
|
||||
SchedRW = [WriteALULd, ReadAfterLd] in {
|
||||
def NAME#16rm
|
||||
: I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
CondNode, EFLAGS))], IIC_CMOV16_RM>,
|
||||
TB, OpSize16;
|
||||
def NAME#32rm
|
||||
: I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
CondNode, EFLAGS))], IIC_CMOV32_RM>,
|
||||
TB, OpSize32;
|
||||
def NAME#64rm
|
||||
:RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
CondNode, EFLAGS))], IIC_CMOV32_RM>, TB;
|
||||
} // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
|
||||
} // end multiclass
|
||||
|
||||
|
||||
// Conditional Moves.
|
||||
defm CMOVO : CMOV<0x40, "cmovo" , X86_COND_O>;
|
||||
defm CMOVNO : CMOV<0x41, "cmovno", X86_COND_NO>;
|
||||
defm CMOVB : CMOV<0x42, "cmovb" , X86_COND_B>;
|
||||
defm CMOVAE : CMOV<0x43, "cmovae", X86_COND_AE>;
|
||||
defm CMOVE : CMOV<0x44, "cmove" , X86_COND_E>;
|
||||
defm CMOVNE : CMOV<0x45, "cmovne", X86_COND_NE>;
|
||||
defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
|
||||
defm CMOVA : CMOV<0x47, "cmova" , X86_COND_A>;
|
||||
defm CMOVS : CMOV<0x48, "cmovs" , X86_COND_S>;
|
||||
defm CMOVNS : CMOV<0x49, "cmovns", X86_COND_NS>;
|
||||
defm CMOVP : CMOV<0x4A, "cmovp" , X86_COND_P>;
|
||||
defm CMOVNP : CMOV<0x4B, "cmovnp", X86_COND_NP>;
|
||||
defm CMOVL : CMOV<0x4C, "cmovl" , X86_COND_L>;
|
||||
defm CMOVGE : CMOV<0x4D, "cmovge", X86_COND_GE>;
|
||||
defm CMOVLE : CMOV<0x4E, "cmovle", X86_COND_LE>;
|
||||
defm CMOVG : CMOV<0x4F, "cmovg" , X86_COND_G>;
|
||||
|
||||
|
||||
// SetCC instructions.
|
||||
multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
|
||||
let Uses = [EFLAGS] in {
|
||||
def r : I<opc, MRMXr, (outs GR8:$dst), (ins),
|
||||
!strconcat(Mnemonic, "\t$dst"),
|
||||
[(set GR8:$dst, (X86setcc OpNode, EFLAGS))],
|
||||
IIC_SET_R>, TB, Sched<[WriteALU]>;
|
||||
def m : I<opc, MRMXm, (outs), (ins i8mem:$dst),
|
||||
!strconcat(Mnemonic, "\t$dst"),
|
||||
[(store (X86setcc OpNode, EFLAGS), addr:$dst)],
|
||||
IIC_SET_M>, TB, Sched<[WriteALU, WriteStore]>;
|
||||
} // Uses = [EFLAGS]
|
||||
}
|
||||
|
||||
defm SETO : SETCC<0x90, "seto", X86_COND_O>; // is overflow bit set
|
||||
defm SETNO : SETCC<0x91, "setno", X86_COND_NO>; // is overflow bit not set
|
||||
defm SETB : SETCC<0x92, "setb", X86_COND_B>; // unsigned less than
|
||||
defm SETAE : SETCC<0x93, "setae", X86_COND_AE>; // unsigned greater or equal
|
||||
defm SETE : SETCC<0x94, "sete", X86_COND_E>; // equal to
|
||||
defm SETNE : SETCC<0x95, "setne", X86_COND_NE>; // not equal to
|
||||
defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>; // unsigned less than or equal
|
||||
defm SETA : SETCC<0x97, "seta", X86_COND_A>; // unsigned greater than
|
||||
defm SETS : SETCC<0x98, "sets", X86_COND_S>; // is signed bit set
|
||||
defm SETNS : SETCC<0x99, "setns", X86_COND_NS>; // is not signed
|
||||
defm SETP : SETCC<0x9A, "setp", X86_COND_P>; // is parity bit set
|
||||
defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>; // is parity bit not set
|
||||
defm SETL : SETCC<0x9C, "setl", X86_COND_L>; // signed less than
|
||||
defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>; // signed greater or equal
|
||||
defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>; // signed less than or equal
|
||||
defm SETG : SETCC<0x9F, "setg", X86_COND_G>; // signed greater than
|
||||
|
||||
// SALC is an undocumented instruction. Information for this instruction can be found
|
||||
// here http://www.rcollins.org/secrets/opcodes/SALC.html
|
||||
// Set AL if carry.
|
||||
let Uses = [EFLAGS], Defs = [AL] in {
|
||||
def SALC : I<0xD6, RawFrm, (outs), (ins), "salc", []>, Requires<[Not64BitMode]>;
|
||||
}
|
||||
//===-- X86InstrCMovSetCC.td - Conditional Move and SetCC --*- tablegen -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes the X86 conditional move and set on condition
|
||||
// instructions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
// CMOV instructions.
|
||||
multiclass CMOV<bits<8> opc, string Mnemonic, PatLeaf CondNode> {
|
||||
let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
|
||||
isCommutable = 1, SchedRW = [WriteALU] in {
|
||||
def NAME#16rr
|
||||
: I<opc, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
|
||||
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR16:$dst,
|
||||
(X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))],
|
||||
IIC_CMOV16_RR>, TB, OpSize16;
|
||||
def NAME#32rr
|
||||
: I<opc, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
|
||||
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR32:$dst,
|
||||
(X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))],
|
||||
IIC_CMOV32_RR>, TB, OpSize32;
|
||||
def NAME#64rr
|
||||
:RI<opc, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
|
||||
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR64:$dst,
|
||||
(X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))],
|
||||
IIC_CMOV32_RR>, TB;
|
||||
}
|
||||
|
||||
let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
|
||||
SchedRW = [WriteALULd, ReadAfterLd] in {
|
||||
def NAME#16rm
|
||||
: I<opc, MRMSrcMem, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
|
||||
!strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
|
||||
CondNode, EFLAGS))], IIC_CMOV16_RM>,
|
||||
TB, OpSize16;
|
||||
def NAME#32rm
|
||||
: I<opc, MRMSrcMem, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
|
||||
!strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
|
||||
CondNode, EFLAGS))], IIC_CMOV32_RM>,
|
||||
TB, OpSize32;
|
||||
def NAME#64rm
|
||||
:RI<opc, MRMSrcMem, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
|
||||
!strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
|
||||
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
|
||||
CondNode, EFLAGS))], IIC_CMOV32_RM>, TB;
|
||||
} // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
|
||||
} // end multiclass
|
||||
|
||||
|
||||
// Conditional Moves.
|
||||
defm CMOVO : CMOV<0x40, "cmovo" , X86_COND_O>;
|
||||
defm CMOVNO : CMOV<0x41, "cmovno", X86_COND_NO>;
|
||||
defm CMOVB : CMOV<0x42, "cmovb" , X86_COND_B>;
|
||||
defm CMOVAE : CMOV<0x43, "cmovae", X86_COND_AE>;
|
||||
defm CMOVE : CMOV<0x44, "cmove" , X86_COND_E>;
|
||||
defm CMOVNE : CMOV<0x45, "cmovne", X86_COND_NE>;
|
||||
defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
|
||||
defm CMOVA : CMOV<0x47, "cmova" , X86_COND_A>;
|
||||
defm CMOVS : CMOV<0x48, "cmovs" , X86_COND_S>;
|
||||
defm CMOVNS : CMOV<0x49, "cmovns", X86_COND_NS>;
|
||||
defm CMOVP : CMOV<0x4A, "cmovp" , X86_COND_P>;
|
||||
defm CMOVNP : CMOV<0x4B, "cmovnp", X86_COND_NP>;
|
||||
defm CMOVL : CMOV<0x4C, "cmovl" , X86_COND_L>;
|
||||
defm CMOVGE : CMOV<0x4D, "cmovge", X86_COND_GE>;
|
||||
defm CMOVLE : CMOV<0x4E, "cmovle", X86_COND_LE>;
|
||||
defm CMOVG : CMOV<0x4F, "cmovg" , X86_COND_G>;
|
||||
|
||||
|
||||
// SetCC instructions.
|
||||
multiclass SETCC<bits<8> opc, string Mnemonic, PatLeaf OpNode> {
|
||||
let Uses = [EFLAGS] in {
|
||||
def r : I<opc, MRMXr, (outs GR8:$dst), (ins),
|
||||
!strconcat(Mnemonic, "\t$dst"),
|
||||
[(set GR8:$dst, (X86setcc OpNode, EFLAGS))],
|
||||
IIC_SET_R>, TB, Sched<[WriteALU]>;
|
||||
def m : I<opc, MRMXm, (outs), (ins i8mem:$dst),
|
||||
!strconcat(Mnemonic, "\t$dst"),
|
||||
[(store (X86setcc OpNode, EFLAGS), addr:$dst)],
|
||||
IIC_SET_M>, TB, Sched<[WriteALU, WriteStore]>;
|
||||
} // Uses = [EFLAGS]
|
||||
}
|
||||
|
||||
defm SETO : SETCC<0x90, "seto", X86_COND_O>; // is overflow bit set
|
||||
defm SETNO : SETCC<0x91, "setno", X86_COND_NO>; // is overflow bit not set
|
||||
defm SETB : SETCC<0x92, "setb", X86_COND_B>; // unsigned less than
|
||||
defm SETAE : SETCC<0x93, "setae", X86_COND_AE>; // unsigned greater or equal
|
||||
defm SETE : SETCC<0x94, "sete", X86_COND_E>; // equal to
|
||||
defm SETNE : SETCC<0x95, "setne", X86_COND_NE>; // not equal to
|
||||
defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>; // unsigned less than or equal
|
||||
defm SETA : SETCC<0x97, "seta", X86_COND_A>; // unsigned greater than
|
||||
defm SETS : SETCC<0x98, "sets", X86_COND_S>; // is signed bit set
|
||||
defm SETNS : SETCC<0x99, "setns", X86_COND_NS>; // is not signed
|
||||
defm SETP : SETCC<0x9A, "setp", X86_COND_P>; // is parity bit set
|
||||
defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>; // is parity bit not set
|
||||
defm SETL : SETCC<0x9C, "setl", X86_COND_L>; // signed less than
|
||||
defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>; // signed greater or equal
|
||||
defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>; // signed less than or equal
|
||||
defm SETG : SETCC<0x9F, "setg", X86_COND_G>; // signed greater than
|
||||
|
||||
// SALC is an undocumented instruction. Information for this instruction can be found
|
||||
// here http://www.rcollins.org/secrets/opcodes/SALC.html
|
||||
// Set AL if carry.
|
||||
let Uses = [EFLAGS], Defs = [AL] in {
|
||||
def SALC : I<0xD6, RawFrm, (outs), (ins), "salc", []>, Requires<[Not64BitMode]>;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user