2014-04-30 15:31:33 +00:00
|
|
|
//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
/// i1 values are usually inserted by the CFG Structurize pass and they are
|
|
|
|
/// unique in that they can be copied from VALU to SALU registers.
|
|
|
|
/// This is not possible for any other value type. Since there are no
|
|
|
|
/// MOV instructions for i1, we to use V_CMP_* and V_CNDMASK to move the i1.
|
|
|
|
///
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "si-i1-copies"
|
|
|
|
#include "AMDGPU.h"
|
2014-08-04 21:25:23 +00:00
|
|
|
#include "AMDGPUSubtarget.h"
|
2014-04-30 15:31:33 +00:00
|
|
|
#include "SIInstrInfo.h"
|
|
|
|
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2017-06-06 11:49:48 +00:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2014-04-30 15:31:33 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
class SILowerI1Copies : public MachineFunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
public:
|
|
|
|
SILowerI1Copies() : MachineFunctionPass(ID) {
|
|
|
|
initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
2014-08-30 16:48:34 +00:00
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2014-04-30 15:31:33 +00:00
|
|
|
|
2016-10-01 02:56:57 +00:00
|
|
|
StringRef getPassName() const override { return "SI Lower i1 Copies"; }
|
2014-04-30 15:31:33 +00:00
|
|
|
|
2014-08-30 16:48:34 +00:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2014-04-30 15:31:33 +00:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // End anonymous namespace.
|
|
|
|
|
2016-02-11 06:15:34 +00:00
|
|
|
INITIALIZE_PASS(SILowerI1Copies, DEBUG_TYPE,
|
|
|
|
"SI Lower i1 Copies", false, false)
|
2014-04-30 15:31:33 +00:00
|
|
|
|
|
|
|
char SILowerI1Copies::ID = 0;
|
|
|
|
|
|
|
|
char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSILowerI1CopiesPass() {
|
|
|
|
return new SILowerI1Copies();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
2016-06-24 06:30:11 +00:00
|
|
|
const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
|
|
|
|
const SIInstrInfo *TII = ST.getInstrInfo();
|
|
|
|
const TargetRegisterInfo *TRI = &TII->getRegisterInfo();
|
|
|
|
|
2014-05-15 14:41:50 +00:00
|
|
|
std::vector<unsigned> I1Defs;
|
2014-04-30 15:31:33 +00:00
|
|
|
|
|
|
|
for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
|
|
|
|
BI != BE; ++BI) {
|
|
|
|
|
|
|
|
MachineBasicBlock &MBB = *BI;
|
|
|
|
MachineBasicBlock::iterator I, Next;
|
|
|
|
for (I = MBB.begin(); I != MBB.end(); I = Next) {
|
|
|
|
Next = std::next(I);
|
|
|
|
MachineInstr &MI = *I;
|
|
|
|
|
2014-11-14 18:43:41 +00:00
|
|
|
if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF) {
|
|
|
|
unsigned Reg = MI.getOperand(0).getReg();
|
|
|
|
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
|
|
|
|
if (RC == &AMDGPU::VReg_1RegClass)
|
|
|
|
MRI.setRegClass(Reg, &AMDGPU::SReg_64RegClass);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-12-03 05:22:35 +00:00
|
|
|
if (MI.getOpcode() != AMDGPU::COPY)
|
2014-04-30 15:31:33 +00:00
|
|
|
continue;
|
|
|
|
|
2014-12-03 05:22:35 +00:00
|
|
|
const MachineOperand &Dst = MI.getOperand(0);
|
|
|
|
const MachineOperand &Src = MI.getOperand(1);
|
|
|
|
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(Src.getReg()) ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
|
|
|
|
continue;
|
2014-04-30 15:31:33 +00:00
|
|
|
|
2014-12-03 05:22:35 +00:00
|
|
|
const TargetRegisterClass *DstRC = MRI.getRegClass(Dst.getReg());
|
|
|
|
const TargetRegisterClass *SrcRC = MRI.getRegClass(Src.getReg());
|
2014-04-30 15:31:33 +00:00
|
|
|
|
2016-11-28 18:58:49 +00:00
|
|
|
DebugLoc DL = MI.getDebugLoc();
|
|
|
|
MachineInstr *DefInst = MRI.getUniqueVRegDef(Src.getReg());
|
2014-04-30 15:31:33 +00:00
|
|
|
if (DstRC == &AMDGPU::VReg_1RegClass &&
|
|
|
|
TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) {
|
2014-12-03 05:22:35 +00:00
|
|
|
I1Defs.push_back(Dst.getReg());
|
|
|
|
|
|
|
|
if (DefInst->getOpcode() == AMDGPU::S_MOV_B64) {
|
|
|
|
if (DefInst->getOperand(1).isImm()) {
|
|
|
|
I1Defs.push_back(Dst.getReg());
|
|
|
|
|
|
|
|
int64_t Val = DefInst->getOperand(1).getImm();
|
|
|
|
assert(Val == 0 || Val == -1);
|
|
|
|
|
|
|
|
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_MOV_B32_e32))
|
2017-01-13 09:58:52 +00:00
|
|
|
.add(Dst)
|
|
|
|
.addImm(Val);
|
2014-12-03 05:22:35 +00:00
|
|
|
MI.eraseFromParent();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-11 00:22:34 +00:00
|
|
|
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64))
|
2017-01-13 09:58:52 +00:00
|
|
|
.add(Dst)
|
|
|
|
.addImm(0)
|
|
|
|
.addImm(-1)
|
|
|
|
.add(Src);
|
2014-04-30 15:31:33 +00:00
|
|
|
MI.eraseFromParent();
|
|
|
|
} else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) &&
|
|
|
|
SrcRC == &AMDGPU::VReg_1RegClass) {
|
2016-11-28 18:58:49 +00:00
|
|
|
if (DefInst->getOpcode() == AMDGPU::V_CNDMASK_B32_e64 &&
|
|
|
|
DefInst->getOperand(1).isImm() && DefInst->getOperand(2).isImm() &&
|
|
|
|
DefInst->getOperand(1).getImm() == 0 &&
|
|
|
|
DefInst->getOperand(2).getImm() != 0 &&
|
|
|
|
DefInst->getOperand(3).isReg() &&
|
|
|
|
TargetRegisterInfo::isVirtualRegister(
|
|
|
|
DefInst->getOperand(3).getReg()) &&
|
|
|
|
TRI->getCommonSubClass(
|
|
|
|
MRI.getRegClass(DefInst->getOperand(3).getReg()),
|
|
|
|
&AMDGPU::SGPR_64RegClass)) {
|
|
|
|
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64))
|
2017-01-13 09:58:52 +00:00
|
|
|
.add(Dst)
|
|
|
|
.addReg(AMDGPU::EXEC)
|
|
|
|
.add(DefInst->getOperand(3));
|
2016-11-28 18:58:49 +00:00
|
|
|
} else {
|
|
|
|
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64))
|
2017-01-13 09:58:52 +00:00
|
|
|
.add(Dst)
|
|
|
|
.add(Src)
|
|
|
|
.addImm(0);
|
2016-11-28 18:58:49 +00:00
|
|
|
}
|
2014-04-30 15:31:33 +00:00
|
|
|
MI.eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-05-15 14:41:50 +00:00
|
|
|
|
|
|
|
for (unsigned Reg : I1Defs)
|
2015-01-07 20:59:25 +00:00
|
|
|
MRI.setRegClass(Reg, &AMDGPU::VGPR_32RegClass);
|
2014-05-15 14:41:50 +00:00
|
|
|
|
2014-04-30 15:31:33 +00:00
|
|
|
return false;
|
|
|
|
}
|