Revert rL342466: [llvm-exegesis] Improve Register Setup.

rL342465 is breaking the MSVC buildbots, but I need to revert this dependent revision as well.

Summary:
Added function to set a register to a particular value + tests.
Add EFLAGS test, use new setRegTo instead of setRegToConstant.

Reviewers: courbet, javed.absar

Subscribers: mgorny, tschuett, llvm-commits

Differential Revision: https://reviews.llvm.org/D51856

llvm-svn: 342489
This commit is contained in:
Simon Pilgrim 2018-09-18 15:35:49 +00:00
parent 7cbd1e08a6
commit 5064dc23e2
16 changed files with 139 additions and 189 deletions

View File

@ -9,7 +9,6 @@
#include "../Target.h"
#include "../Latency.h"
#include "AArch64.h"
#include "AArch64RegisterInfo.h"
namespace exegesis {
@ -27,51 +26,33 @@ private:
}
};
namespace {
static unsigned getLoadImmediateOpcode(unsigned RegBitWidth) {
switch (RegBitWidth) {
case 32:
return llvm::AArch64::MOVi32imm;
case 64:
return llvm::AArch64::MOVi64imm;
}
llvm_unreachable("Invalid Value Width");
}
// Generates instruction to load an immediate value into a register.
static llvm::MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth,
const llvm::APInt &Value) {
if (Value.getBitWidth() > RegBitWidth)
llvm_unreachable("Value must fit in the Register");
return llvm::MCInstBuilder(getLoadImmediateOpcode(RegBitWidth))
.addReg(Reg)
.addImm(Value.getZExtValue());
}
} // namespace
class ExegesisAArch64Target : public ExegesisTarget {
std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
unsigned Reg,
const llvm::APInt &Value) const override {
if (llvm::AArch64::GPR32RegClass.contains(Reg))
return {loadImmediate(Reg, 32, Value)};
if (llvm::AArch64::GPR64RegClass.contains(Reg))
return {loadImmediate(Reg, 64, Value)};
llvm::errs() << "setRegTo is not implemented, results will be unreliable\n";
return {};
const llvm::APInt &Value,
unsigned Reg) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getScratchMemoryRegister(const llvm::Triple &) const override {
llvm_unreachable("Not yet implemented");
}
void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg,
unsigned Offset) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getMaxMemoryAccessSize() const override {
llvm_unreachable("Not yet implemented");
}
bool matchesArch(llvm::Triple::ArchType Arch) const override {
return Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be;
}
void addTargetSpecificPasses(llvm::PassManagerBase &PM) const override {
// Function return is a pseudo-instruction that needs to be expanded
PM.add(llvm::createAArch64ExpandPseudoPass());
}
std::unique_ptr<BenchmarkRunner>
createLatencyBenchmarkRunner(const LLVMState &State) const override {
return llvm::make_unique<AArch64LatencyBenchmarkRunner>(State);

View File

@ -29,18 +29,18 @@ static constexpr const char ModuleID[] = "ExegesisInfoTest";
static constexpr const char FunctionID[] = "foo";
static std::vector<llvm::MCInst>
generateSnippetSetupCode(const ExegesisTarget &ET,
const llvm::MCSubtargetInfo *const MSI,
llvm::ArrayRef<RegisterValue> RegisterInitialValues,
bool &IsSnippetSetupComplete) {
generateSnippetSetupCode(const llvm::ArrayRef<unsigned> RegsToDef,
const ExegesisTarget &ET,
const llvm::LLVMTargetMachine &TM, bool &IsComplete) {
IsComplete = true;
std::vector<llvm::MCInst> Result;
for (const RegisterValue &RV : RegisterInitialValues) {
// Load a constant in the register.
const auto SetRegisterCode = ET.setRegTo(*MSI, RV.Register, RV.Value);
if (SetRegisterCode.empty())
IsSnippetSetupComplete = false;
Result.insert(Result.end(), SetRegisterCode.begin(), SetRegisterCode.end());
}
// for (const unsigned Reg : RegsToDef) {
// // Load a constant in the register.
// const auto Code = ET.setRegToConstant(*TM.getMCSubtargetInfo(), Reg);
// if (Code.empty())
// IsComplete = false;
// Result.insert(Result.end(), Code.begin(), Code.end());
// }
return Result;
}
@ -149,7 +149,7 @@ llvm::BitVector getFunctionReservedRegs(const llvm::TargetMachine &TM) {
void assembleToStream(const ExegesisTarget &ET,
std::unique_ptr<llvm::LLVMTargetMachine> TM,
llvm::ArrayRef<unsigned> LiveIns,
llvm::ArrayRef<RegisterValue> RegisterInitialValues,
llvm::ArrayRef<unsigned> RegsToDef,
llvm::ArrayRef<llvm::MCInst> Instructions,
llvm::raw_pwrite_stream &AsmStream) {
std::unique_ptr<llvm::LLVMContext> Context =
@ -171,12 +171,13 @@ void assembleToStream(const ExegesisTarget &ET,
MF.getRegInfo().addLiveIn(Reg);
bool IsSnippetSetupComplete = false;
std::vector<llvm::MCInst> Code =
generateSnippetSetupCode(ET, TM->getMCSubtargetInfo(),
RegisterInitialValues, IsSnippetSetupComplete);
Code.insert(Code.end(), Instructions.begin(), Instructions.end());
std::vector<llvm::MCInst> SnippetWithSetup =
generateSnippetSetupCode(RegsToDef, ET, *TM, IsSnippetSetupComplete);
if (!SnippetWithSetup.empty()) {
SnippetWithSetup.insert(SnippetWithSetup.end(), Instructions.begin(),
Instructions.end());
Instructions = SnippetWithSetup;
}
// If the snippet setup is not complete, we disable liveliness tracking. This
// means that we won't know what values are in the registers.
if (!IsSnippetSetupComplete)
@ -187,7 +188,7 @@ void assembleToStream(const ExegesisTarget &ET,
MF.getRegInfo().freezeReservedRegs(MF);
// Fill the MachineFunction from the instructions.
fillMachineFunction(MF, LiveIns, Code);
fillMachineFunction(MF, LiveIns, Instructions);
// We create the pass manager, run the passes to populate AsmBuffer.
llvm::MCContext &MCContext = MMI->getContext();

View File

@ -39,12 +39,6 @@ class ExegesisTarget;
// convention and target machine).
llvm::BitVector getFunctionReservedRegs(const llvm::TargetMachine &TM);
// A simple object storing the value for a particular register.
struct RegisterValue {
unsigned Register;
llvm::APInt Value;
};
// Creates a temporary `void foo(char*)` function containing the provided
// Instructions. Runs a set of llvm Passes to provide correct prologue and
// epilogue. Once the MachineFunction is ready, it is assembled for TM to
@ -52,7 +46,7 @@ struct RegisterValue {
void assembleToStream(const ExegesisTarget &ET,
std::unique_ptr<llvm::LLVMTargetMachine> TM,
llvm::ArrayRef<unsigned> LiveIns,
llvm::ArrayRef<RegisterValue> RegisterInitialValues,
llvm::ArrayRef<unsigned> RegsToDef,
llvm::ArrayRef<llvm::MCInst> Instructions,
llvm::raw_pwrite_stream &AsmStream);

View File

@ -23,7 +23,7 @@ struct BenchmarkCode {
// Before the code is executed some instructions are added to setup the
// registers initial values.
std::vector<RegisterValue> RegisterInitialValues;
std::vector<unsigned> RegsToDef;
// We also need to provide the registers that are live on entry for the
// assembler to generate proper prologue/epilogue.

View File

@ -104,7 +104,7 @@ BenchmarkRunner::writeObjectFile(const BenchmarkCode &BC,
return std::move(E);
llvm::raw_fd_ostream OFS(ResultFD, true /*ShouldClose*/);
assembleToStream(State.getExegesisTarget(), State.createTargetMachine(),
BC.LiveIns, BC.RegisterInitialValues, Code, OFS);
BC.LiveIns, BC.RegsToDef, Code, OFS);
return ResultPath.str();
}

View File

@ -49,7 +49,7 @@ SnippetGenerator::generateConfigurations(unsigned Opcode) const {
}
if (CT.ScratchSpacePointerInReg)
BC.LiveIns.push_back(CT.ScratchSpacePointerInReg);
BC.RegisterInitialValues = computeRegisterInitialValues(CT.Instructions);
BC.RegsToDef = computeRegsToDef(CT.Instructions);
Output.push_back(std::move(BC));
}
return Output;
@ -57,14 +57,14 @@ SnippetGenerator::generateConfigurations(unsigned Opcode) const {
return E.takeError();
}
std::vector<RegisterValue> SnippetGenerator::computeRegisterInitialValues(
std::vector<unsigned> SnippetGenerator::computeRegsToDef(
const std::vector<InstructionBuilder> &Instructions) const {
// Collect all register uses and create an assignment for each of them.
// Ignore memory operands which are handled separately.
// Loop invariant: DefinedRegs[i] is true iif it has been set at least once
// before the current instruction.
llvm::BitVector DefinedRegs = RATC.emptyRegisters();
std::vector<RegisterValue> RIV;
std::vector<unsigned> RegsToDef;
for (const InstructionBuilder &IB : Instructions) {
// Returns the register that this Operand sets or uses, or 0 if this is not
// a register.
@ -82,7 +82,7 @@ std::vector<RegisterValue> SnippetGenerator::computeRegisterInitialValues(
if (!Op.IsDef) {
const unsigned Reg = GetOpReg(Op);
if (Reg > 0 && !DefinedRegs.test(Reg)) {
RIV.push_back(RegisterValue{Reg, llvm::APInt()});
RegsToDef.push_back(Reg);
DefinedRegs.set(Reg);
}
}
@ -96,7 +96,7 @@ std::vector<RegisterValue> SnippetGenerator::computeRegisterInitialValues(
}
}
}
return RIV;
return RegsToDef;
}
llvm::Expected<CodeTemplate> SnippetGenerator::generateSelfAliasingCodeTemplate(

View File

@ -48,8 +48,8 @@ public:
generateConfigurations(unsigned Opcode) const;
// Given a snippet, computes which registers the setup code needs to define.
std::vector<RegisterValue> computeRegisterInitialValues(
const std::vector<InstructionBuilder> &Snippet) const;
std::vector<unsigned>
computeRegsToDef(const std::vector<InstructionBuilder> &Snippet) const;
protected:
const LLVMState &State;

View File

@ -90,8 +90,21 @@ namespace {
class ExegesisDefaultTarget : public ExegesisTarget {
private:
std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
unsigned Reg,
const llvm::APInt &Value) const override {
const llvm::APInt &Value,
unsigned Reg) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getScratchMemoryRegister(const llvm::Triple &) const override {
llvm_unreachable("Not yet implemented");
}
void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg,
unsigned Offset) const override {
llvm_unreachable("Not yet implemented");
}
unsigned getMaxMemoryAccessSize() const override {
llvm_unreachable("Not yet implemented");
}

View File

@ -36,31 +36,25 @@ public:
virtual void addTargetSpecificPasses(llvm::PassManagerBase &PM) const {}
// Generates code to move a constant into a the given register.
// Precondition: Value must fit into Reg.
virtual std::vector<llvm::MCInst>
setRegTo(const llvm::MCSubtargetInfo &STI, unsigned Reg,
const llvm::APInt &Value) const = 0;
virtual std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
const llvm::APInt &Value,
unsigned Reg) const = 0;
// Returns the register pointing to scratch memory, or 0 if this target
// does not support memory operands. The benchmark function uses the
// default calling convention.
virtual unsigned getScratchMemoryRegister(const llvm::Triple &) const {
return 0;
}
virtual unsigned getScratchMemoryRegister(const llvm::Triple &) const = 0;
// Fills memory operands with references to the address at [Reg] + Offset.
virtual void fillMemoryOperands(InstructionBuilder &IB, unsigned Reg,
unsigned Offset) const {
llvm_unreachable(
"fillMemoryOperands() requires getScratchMemoryRegister() > 0");
}
unsigned Offset) const = 0;
// Returns the maximum number of bytes a load/store instruction can access at
// once. This is typically the size of the largest register available on the
// processor. Note that this only used as a hint to generate independant
// load/stores to/from memory, so the exact returned value does not really
// matter as long as it's large enough.
virtual unsigned getMaxMemoryAccessSize() const { return 0; }
virtual unsigned getMaxMemoryAccessSize() const = 0;
// Creates a snippet generator for the given mode.
std::unique_ptr<SnippetGenerator>

View File

@ -101,8 +101,8 @@ protected:
}
};
static unsigned GetLoadImmediateOpcode(unsigned RegBitWidth) {
switch (RegBitWidth) {
static unsigned GetLoadImmediateOpcode(const llvm::APInt &Value) {
switch (Value.getBitWidth()) {
case 8:
return llvm::X86::MOV8ri;
case 16:
@ -115,12 +115,10 @@ static unsigned GetLoadImmediateOpcode(unsigned RegBitWidth) {
llvm_unreachable("Invalid Value Width");
}
// Generates instruction to load an immediate value into a register.
static llvm::MCInst loadImmediate(unsigned Reg, unsigned RegBitWidth,
const llvm::APInt &Value) {
if (Value.getBitWidth() > RegBitWidth)
llvm_unreachable("Value must fit in the Register");
return llvm::MCInstBuilder(GetLoadImmediateOpcode(RegBitWidth))
static llvm::MCInst loadImmediate(unsigned Reg, const llvm::APInt &Value,
unsigned MaxBitWidth) {
assert(Value.getBitWidth() <= MaxBitWidth && "Value too big to fit register");
return llvm::MCInstBuilder(GetLoadImmediateOpcode(Value))
.addReg(Reg)
.addImm(Value.getZExtValue());
}
@ -167,8 +165,6 @@ static llvm::MCInst releaseStackSpace(unsigned Bytes) {
.addImm(Bytes);
}
// Reserves some space on the stack, fills it with the content of the provided
// constant and provide methods to load the stack value into a register.
struct ConstantInliner {
explicit ConstantInliner(const llvm::APInt &Constant)
: StackSize(Constant.getBitWidth() / 8) {
@ -191,19 +187,17 @@ struct ConstantInliner {
Constant.extractBits(8, ByteOffset * 8).getZExtValue()));
}
std::vector<llvm::MCInst> loadAndFinalize(unsigned Reg, unsigned RegBitWidth,
unsigned Opcode) {
assert(StackSize * 8 == RegBitWidth &&
"Value does not have the correct size");
std::vector<llvm::MCInst> loadAndFinalize(unsigned Reg, unsigned Opcode,
unsigned BitWidth) {
assert(StackSize * 8 == BitWidth && "Value does not have the correct size");
add(loadToReg(Reg, Opcode));
add(releaseStackSpace(StackSize));
return std::move(Instructions);
}
std::vector<llvm::MCInst>
loadX87AndFinalize(unsigned Reg, unsigned RegBitWidth, unsigned Opcode) {
assert(StackSize * 8 == RegBitWidth &&
"Value does not have the correct size");
std::vector<llvm::MCInst> loadX87AndFinalize(unsigned Reg, unsigned Opcode,
unsigned BitWidth) {
assert(StackSize * 8 == BitWidth && "Value does not have the correct size");
add(llvm::MCInstBuilder(Opcode)
.addReg(llvm::X86::RSP) // BaseReg
.addImm(1) // ScaleAmt
@ -217,7 +211,7 @@ struct ConstantInliner {
}
std::vector<llvm::MCInst> popFlagAndFinalize() {
assert(StackSize * 8 == 64 && "Value does not have the correct size");
assert(StackSize * 8 == 32 && "Value does not have the correct size");
add(llvm::MCInstBuilder(llvm::X86::POPF64));
return std::move(Instructions);
}
@ -281,46 +275,46 @@ class ExegesisX86Target : public ExegesisTarget {
}
std::vector<llvm::MCInst> setRegTo(const llvm::MCSubtargetInfo &STI,
unsigned Reg,
const llvm::APInt &Value) const override {
const llvm::APInt &Value,
unsigned Reg) const override {
if (llvm::X86::GR8RegClass.contains(Reg))
return {loadImmediate(Reg, 8, Value)};
return {loadImmediate(Reg, Value, 8)};
if (llvm::X86::GR16RegClass.contains(Reg))
return {loadImmediate(Reg, 16, Value)};
return {loadImmediate(Reg, Value, 16)};
if (llvm::X86::GR32RegClass.contains(Reg))
return {loadImmediate(Reg, 32, Value)};
return {loadImmediate(Reg, Value, 32)};
if (llvm::X86::GR64RegClass.contains(Reg))
return {loadImmediate(Reg, 64, Value)};
return {loadImmediate(Reg, Value, 64)};
ConstantInliner CI(Value);
if (llvm::X86::VR64RegClass.contains(Reg))
return CI.loadAndFinalize(Reg, 64, llvm::X86::MMX_MOVQ64rm);
return CI.loadAndFinalize(Reg, llvm::X86::MMX_MOVQ64rm, 64);
if (llvm::X86::VR128XRegClass.contains(Reg)) {
if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
return CI.loadAndFinalize(Reg, 128, llvm::X86::VMOVDQU32Z128rm);
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Z128rm, 128);
if (STI.getFeatureBits()[llvm::X86::FeatureAVX])
return CI.loadAndFinalize(Reg, 128, llvm::X86::VMOVDQUrm);
return CI.loadAndFinalize(Reg, 128, llvm::X86::MOVDQUrm);
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQUrm, 128);
return CI.loadAndFinalize(Reg, llvm::X86::MOVDQUrm, 128);
}
if (llvm::X86::VR256XRegClass.contains(Reg)) {
if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
return CI.loadAndFinalize(Reg, 256, llvm::X86::VMOVDQU32Z256rm);
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Z256rm, 256);
if (STI.getFeatureBits()[llvm::X86::FeatureAVX])
return CI.loadAndFinalize(Reg, 256, llvm::X86::VMOVDQUYrm);
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQUYrm, 256);
}
if (llvm::X86::VR512RegClass.contains(Reg))
if (STI.getFeatureBits()[llvm::X86::FeatureAVX512])
return CI.loadAndFinalize(Reg, 512, llvm::X86::VMOVDQU32Zrm);
return CI.loadAndFinalize(Reg, llvm::X86::VMOVDQU32Zrm, 512);
if (llvm::X86::RSTRegClass.contains(Reg)) {
if (Value.getBitWidth() == 32)
return CI.loadX87AndFinalize(Reg, 32, llvm::X86::LD_F32m);
return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F32m, 32);
if (Value.getBitWidth() == 64)
return CI.loadX87AndFinalize(Reg, 64, llvm::X86::LD_F64m);
return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F64m, 64);
if (Value.getBitWidth() == 80)
return CI.loadX87AndFinalize(Reg, 80, llvm::X86::LD_F80m);
return CI.loadX87AndFinalize(Reg, llvm::X86::LD_F80m, 80);
}
if (Reg == llvm::X86::EFLAGS)
return CI.popFlagAndFinalize();
return {}; // Not yet implemented.
llvm_unreachable("Not yet implemented");
}
std::unique_ptr<SnippetGenerator>

View File

@ -15,16 +15,11 @@ void InitializeAArch64ExegesisTarget();
namespace {
using llvm::APInt;
using llvm::MCInst;
using testing::Gt;
using testing::IsEmpty;
using testing::Not;
using testing::NotNull;
using testing::SizeIs;
constexpr const char kTriple[] = "aarch64-unknown-linux";
constexpr const char kGenericCpu[] = "generic";
constexpr const char kNoFeatures[] = "";
class AArch64TargetTest : public ::testing::Test {
protected:
@ -34,10 +29,7 @@ protected:
std::string error;
Target_ = llvm::TargetRegistry::lookupTarget(kTriple, error);
EXPECT_THAT(Target_, NotNull());
STI_.reset(
Target_->createMCSubtargetInfo(kTriple, kGenericCpu, kNoFeatures));
}
static void SetUpTestCase() {
LLVMInitializeAArch64TargetInfo();
LLVMInitializeAArch64Target();
@ -45,20 +37,9 @@ protected:
InitializeAArch64ExegesisTarget();
}
std::vector<MCInst> setRegTo(unsigned Reg, const APInt &Value) {
return ExegesisTarget_->setRegTo(*STI_, Reg, Value);
}
const llvm::Target *Target_;
const ExegesisTarget *const ExegesisTarget_;
std::unique_ptr<llvm::MCSubtargetInfo> STI_;
};
TEST_F(AArch64TargetTest, SetRegToConstant) {
// The AArch64 target currently doesn't know how to set register values.
const auto Insts = setRegTo(llvm::AArch64::X0, llvm::APInt());
EXPECT_THAT(Insts, Not(IsEmpty()));
}
} // namespace
} // namespace exegesis

View File

@ -30,11 +30,12 @@ protected:
};
TEST_F(ARMMachineFunctionGeneratorTest, DISABLED_JitFunction) {
Check({}, llvm::MCInst(), 0x1e, 0xff, 0x2f, 0xe1);
Check(ExegesisTarget::getDefault(), {}, llvm::MCInst(), 0x1e, 0xff, 0x2f,
0xe1);
}
TEST_F(ARMMachineFunctionGeneratorTest, DISABLED_JitFunctionADDrr) {
Check({{llvm::ARM::R0, llvm::APInt()}},
Check(ExegesisTarget::getDefault(), {llvm::ARM::R0},
MCInstBuilder(llvm::ARM::ADDrr)
.addReg(llvm::ARM::R0)
.addReg(llvm::ARM::R0)

View File

@ -32,9 +32,7 @@ protected:
const std::string &CpuName)
: TT(TT), CpuName(CpuName),
CanExecute(llvm::Triple(TT).getArch() ==
llvm::Triple(llvm::sys::getProcessTriple()).getArch()),
ET(ExegesisTarget::lookup(llvm::Triple(TT))) {
assert(ET);
llvm::Triple(llvm::sys::getProcessTriple()).getArch()) {
if (!CanExecute) {
llvm::outs() << "Skipping execution, host:"
<< llvm::sys::getProcessTriple() << ", target:" << TT
@ -43,12 +41,12 @@ protected:
}
template <class... Bs>
inline void Check(llvm::ArrayRef<RegisterValue> RegisterInitialValues,
llvm::MCInst MCInst, Bs... Bytes) {
inline void Check(const ExegesisTarget &ET,
llvm::ArrayRef<unsigned> RegsToDef, llvm::MCInst MCInst,
Bs... Bytes) {
ExecutableFunction Function =
(MCInst.getOpcode() == 0)
? assembleToFunction(RegisterInitialValues, {})
: assembleToFunction(RegisterInitialValues, {MCInst});
(MCInst.getOpcode() == 0) ? assembleToFunction(ET, RegsToDef, {})
: assembleToFunction(ET, RegsToDef, {MCInst});
ASSERT_THAT(Function.getFunctionBytes().str(),
testing::ElementsAre(Bytes...));
if (CanExecute) {
@ -72,12 +70,14 @@ private:
}
ExecutableFunction
assembleToFunction(llvm::ArrayRef<RegisterValue> RegisterInitialValues,
assembleToFunction(const ExegesisTarget &ET,
llvm::ArrayRef<unsigned> RegsToDef,
llvm::ArrayRef<llvm::MCInst> Instructions) {
llvm::SmallString<256> Buffer;
llvm::raw_svector_ostream AsmStream(Buffer);
assembleToStream(*ET, createTargetMachine(), /*LiveIns=*/{},
RegisterInitialValues, Instructions, AsmStream);
assembleToStream(ET, createTargetMachine(), /*LiveIns=*/{},
RegsToDef, Instructions,
AsmStream);
return ExecutableFunction(createTargetMachine(),
getObjectFromBuffer(AsmStream.str()));
}
@ -85,7 +85,6 @@ private:
const std::string TT;
const std::string CpuName;
const bool CanExecute;
const ExegesisTarget *const ET;
};
} // namespace exegesis

View File

@ -39,12 +39,19 @@ protected:
};
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunction) {
Check({}, llvm::MCInst(), 0xc3);
Check(ExegesisTarget::getDefault(), {}, llvm::MCInst(), 0xc3);
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_Default) {
Check(ExegesisTarget::getDefault(), {EAX},
MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX), 0x31, 0xc0,
0xc3);
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_X86) {
Check({{EAX, llvm::APInt(32, 1)}},
MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX),
const auto *ET = ExegesisTarget::lookup(llvm::Triple("x86_64-unknown-linux"));
ASSERT_NE(ET, nullptr);
Check(*ET, {EAX}, MCInstBuilder(XOR32rr).addReg(EAX).addReg(EAX).addReg(EAX),
// mov eax, 1
0xb8, 0x01, 0x00, 0x00, 0x00,
// xor eax, eax
@ -52,13 +59,15 @@ TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionXOR32rr_X86) {
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionMOV64ri) {
Check({}, MCInstBuilder(MOV64ri32).addReg(RAX).addImm(42), 0x48, 0xc7, 0xc0,
0x2a, 0x00, 0x00, 0x00, 0xc3);
Check(ExegesisTarget::getDefault(), {},
MCInstBuilder(MOV64ri32).addReg(RAX).addImm(42), 0x48, 0xc7, 0xc0, 0x2a,
0x00, 0x00, 0x00, 0xc3);
}
TEST_F(X86MachineFunctionGeneratorTest, DISABLED_JitFunctionMOV32ri) {
Check({}, MCInstBuilder(MOV32ri).addReg(EAX).addImm(42), 0xb8, 0x2a, 0x00,
0x00, 0x00, 0xc3);
Check(ExegesisTarget::getDefault(), {},
MCInstBuilder(MOV32ri).addReg(EAX).addImm(42), 0xb8, 0x2a, 0x00, 0x00,
0x00, 0xc3);
}
} // namespace

View File

@ -261,13 +261,7 @@ private:
using FakeSnippetGeneratorTest = SnippetGeneratorTest<FakeSnippetGenerator>;
testing::Matcher<const RegisterValue &> IsRegisterValue(unsigned Reg,
llvm::APInt Value) {
return testing::AllOf(testing::Field(&RegisterValue::Register, Reg),
testing::Field(&RegisterValue::Value, Value));
}
TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd16ri) {
TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd16ri) {
// ADD16ri:
// explicit def 0 : reg RegClass=GR16
// explicit use 1 : reg RegClass=GR16 | TIED_TO:0
@ -278,11 +272,11 @@ TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd16ri) {
llvm::MCOperand::createReg(llvm::X86::AX);
std::vector<InstructionBuilder> Snippet;
Snippet.push_back(std::move(IB));
const auto RIV = Generator.computeRegisterInitialValues(Snippet);
EXPECT_THAT(RIV, ElementsAre(IsRegisterValue(llvm::X86::AX, llvm::APInt())));
const auto RegsToDef = Generator.computeRegsToDef(Snippet);
EXPECT_THAT(RegsToDef, UnorderedElementsAre(llvm::X86::AX));
}
TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd64rr) {
TEST_F(FakeSnippetGeneratorTest, ComputeRegsToDefAdd64rr) {
// ADD64rr:
// mov64ri rax, 42
// add64rr rax, rax, rbx
@ -304,8 +298,8 @@ TEST_F(FakeSnippetGeneratorTest, ComputeRegisterInitialValuesAdd64rr) {
Snippet.push_back(std::move(Add));
}
const auto RIV = Generator.computeRegisterInitialValues(Snippet);
EXPECT_THAT(RIV, ElementsAre(IsRegisterValue(llvm::X86::RBX, llvm::APInt())));
const auto RegsToDef = Generator.computeRegsToDef(Snippet);
EXPECT_THAT(RegsToDef, UnorderedElementsAre(llvm::X86::RBX));
}
} // namespace

View File

@ -125,7 +125,7 @@ protected:
}
std::vector<MCInst> setRegTo(unsigned Reg, const APInt &Value) {
return ExegesisTarget_->setRegTo(*STI_, Reg, Value);
return ExegesisTarget_->setRegTo(*STI_, Value, Reg);
}
const llvm::Target *Target_;
@ -137,16 +137,6 @@ using Core2TargetTest = X86TargetTest<kCpuCore2, kFeaturesEmpty>;
using Core2AvxTargetTest = X86TargetTest<kCpuCore2, kFeaturesAvx>;
using Core2Avx512TargetTest = X86TargetTest<kCpuCore2, kFeaturesAvx512VL>;
TEST_F(Core2TargetTest, SetFlags) {
const unsigned Reg = llvm::X86::EFLAGS;
EXPECT_THAT(
setRegTo(Reg, APInt(64, 0x1111222233334444ULL)),
ElementsAre(IsStackAllocate(8),
IsMovValueToStack(llvm::X86::MOV32mi, 0x33334444UL, 0),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 4),
OpcodeIs(llvm::X86::POPF64)));
}
TEST_F(Core2TargetTest, SetRegToGR8Value) {
const uint8_t Value = 0xFFU;
const unsigned Reg = llvm::X86::AL;
@ -295,7 +285,7 @@ TEST_F(Core2TargetTest, SetRegToST0_32Bits) {
setRegTo(llvm::X86::ST0, APInt(32, 0x11112222ULL)),
ElementsAre(IsStackAllocate(4),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 0),
OpcodeIs(llvm::X86::LD_F32m), IsStackDeallocate(4)));
testing::A<MCInst>(), IsStackDeallocate(4)));
}
TEST_F(Core2TargetTest, SetRegToST1_32Bits) {
@ -305,8 +295,7 @@ TEST_F(Core2TargetTest, SetRegToST1_32Bits) {
setRegTo(llvm::X86::ST1, APInt(32, 0x11112222ULL)),
ElementsAre(IsStackAllocate(4),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 0),
OpcodeIs(llvm::X86::LD_F32m), CopySt0ToSt1,
IsStackDeallocate(4)));
testing::A<MCInst>(), CopySt0ToSt1, IsStackDeallocate(4)));
}
TEST_F(Core2TargetTest, SetRegToST0_64Bits) {
@ -315,7 +304,7 @@ TEST_F(Core2TargetTest, SetRegToST0_64Bits) {
ElementsAre(IsStackAllocate(8),
IsMovValueToStack(llvm::X86::MOV32mi, 0x33334444UL, 0),
IsMovValueToStack(llvm::X86::MOV32mi, 0x11112222UL, 4),
OpcodeIs(llvm::X86::LD_F64m), IsStackDeallocate(8)));
testing::A<MCInst>(), IsStackDeallocate(8)));
}
TEST_F(Core2TargetTest, SetRegToST0_80Bits) {
@ -325,7 +314,7 @@ TEST_F(Core2TargetTest, SetRegToST0_80Bits) {
IsMovValueToStack(llvm::X86::MOV32mi, 0x44445555UL, 0),
IsMovValueToStack(llvm::X86::MOV32mi, 0x22223333UL, 4),
IsMovValueToStack(llvm::X86::MOV16mi, 0x1111UL, 8),
OpcodeIs(llvm::X86::LD_F80m), IsStackDeallocate(10)));
testing::A<MCInst>(), IsStackDeallocate(10)));
}
} // namespace