mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-04-01 07:11:45 +00:00
AMDGPU/GlobalISel: Select wqm, softwqm and wwm intrinsics
This commit is contained in:
parent
2c13fd38f3
commit
2e152027b1
@ -88,6 +88,30 @@ bool AMDGPUInstructionSelector::isVCC(Register Reg,
|
||||
return RB->getID() == AMDGPU::VCCRegBankID;
|
||||
}
|
||||
|
||||
bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
|
||||
unsigned NewOpc) const {
|
||||
MI.setDesc(TII.get(NewOpc));
|
||||
MI.RemoveOperand(1); // Remove intrinsic ID.
|
||||
MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
|
||||
|
||||
MachineOperand &Dst = MI.getOperand(0);
|
||||
MachineOperand &Src = MI.getOperand(1);
|
||||
|
||||
// TODO: This should be legalized to s32 if needed
|
||||
if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
|
||||
return false;
|
||||
|
||||
const TargetRegisterClass *DstRC
|
||||
= TRI.getConstrainedRegClassForOperand(Dst, *MRI);
|
||||
const TargetRegisterClass *SrcRC
|
||||
= TRI.getConstrainedRegClassForOperand(Src, *MRI);
|
||||
if (!DstRC || DstRC != SrcRC)
|
||||
return false;
|
||||
|
||||
return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
|
||||
RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
|
||||
}
|
||||
|
||||
bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
|
||||
const DebugLoc &DL = I.getDebugLoc();
|
||||
MachineBasicBlock *BB = I.getParent();
|
||||
@ -706,6 +730,12 @@ bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
|
||||
}
|
||||
case Intrinsic::amdgcn_interp_p1_f16:
|
||||
return selectInterpP1F16(I);
|
||||
case Intrinsic::amdgcn_wqm:
|
||||
return constrainCopyLikeIntrin(I, AMDGPU::WQM);
|
||||
case Intrinsic::amdgcn_softwqm:
|
||||
return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
|
||||
case Intrinsic::amdgcn_wwm:
|
||||
return constrainCopyLikeIntrin(I, AMDGPU::WWM);
|
||||
default:
|
||||
return selectImpl(I, *CoverageInfo);
|
||||
}
|
||||
|
@ -80,6 +80,8 @@ private:
|
||||
MachineOperand getSubOperand64(MachineOperand &MO,
|
||||
const TargetRegisterClass &SubRC,
|
||||
unsigned SubIdx) const;
|
||||
|
||||
bool constrainCopyLikeIntrin(MachineInstr &MI, unsigned NewOpc) const;
|
||||
bool selectCOPY(MachineInstr &I) const;
|
||||
bool selectPHI(MachineInstr &I) const;
|
||||
bool selectG_TRUNC(MachineInstr &I) const;
|
||||
|
@ -3096,8 +3096,6 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
|
||||
case Intrinsic::amdgcn_udot4:
|
||||
case Intrinsic::amdgcn_sdot8:
|
||||
case Intrinsic::amdgcn_udot8:
|
||||
case Intrinsic::amdgcn_wwm:
|
||||
case Intrinsic::amdgcn_wqm:
|
||||
return getDefaultMappingVOP(MI);
|
||||
case Intrinsic::amdgcn_ds_swizzle:
|
||||
case Intrinsic::amdgcn_ds_permute:
|
||||
@ -3105,6 +3103,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
|
||||
case Intrinsic::amdgcn_update_dpp:
|
||||
case Intrinsic::amdgcn_mov_dpp8:
|
||||
case Intrinsic::amdgcn_mov_dpp:
|
||||
case Intrinsic::amdgcn_wwm:
|
||||
case Intrinsic::amdgcn_wqm:
|
||||
case Intrinsic::amdgcn_softwqm:
|
||||
return getDefaultMappingAllVGPR(MI);
|
||||
case Intrinsic::amdgcn_kernarg_segment_ptr:
|
||||
case Intrinsic::amdgcn_s_getpc:
|
||||
|
82
test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.softwqm.ll
Normal file
82
test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.softwqm.ll
Normal file
@ -0,0 +1,82 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc -global-isel -march=amdgcn -mcpu=hawaii -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
define amdgpu_ps float @softwqm_f32(float %val) {
|
||||
; GCN-LABEL: name: softwqm_f32
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[SOFT_WQM:%[0-9]+]]:vgpr_32 = SOFT_WQM [[COPY]], implicit $exec
|
||||
; GCN: $vgpr0 = COPY [[SOFT_WQM]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0
|
||||
%ret = call float @llvm.amdgcn.softwqm.f32(float %val)
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
define amdgpu_ps float @softwqm_v2f16(float %arg) {
|
||||
; GCN-LABEL: name: softwqm_v2f16
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[SOFT_WQM:%[0-9]+]]:vgpr_32 = SOFT_WQM [[COPY]], implicit $exec
|
||||
; GCN: $vgpr0 = COPY [[SOFT_WQM]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0
|
||||
%val = bitcast float %arg to <2 x half>
|
||||
%ret = call <2 x half> @llvm.amdgcn.softwqm.v2f16(<2 x half> %val)
|
||||
%bc = bitcast <2 x half> %ret to float
|
||||
ret float %bc
|
||||
}
|
||||
|
||||
define amdgpu_ps <2 x float> @softwqm_f64(double %val) {
|
||||
; GCN-LABEL: name: softwqm_f64
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0, $vgpr1
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
||||
; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
|
||||
; GCN: [[SOFT_WQM:%[0-9]+]]:vreg_64 = SOFT_WQM [[REG_SEQUENCE]], implicit $exec
|
||||
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[SOFT_WQM]].sub0
|
||||
; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[SOFT_WQM]].sub1
|
||||
; GCN: $vgpr0 = COPY [[COPY2]]
|
||||
; GCN: $vgpr1 = COPY [[COPY3]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
|
||||
%ret = call double @llvm.amdgcn.softwqm.f64(double %val)
|
||||
%bitcast = bitcast double %ret to <2 x float>
|
||||
ret <2 x float> %bitcast
|
||||
}
|
||||
|
||||
; TODO
|
||||
; define amdgpu_ps float @softwqm_i1_vcc(float %val) {
|
||||
; %vcc = fcmp oeq float %val, 0.0
|
||||
; %ret = call i1 @llvm.amdgcn.softwqm.i1(i1 %vcc)
|
||||
; %select = select i1 %ret, float 1.0, float 0.0
|
||||
; ret float %select
|
||||
; }
|
||||
|
||||
define amdgpu_ps <3 x float> @softwqm_v3f32(<3 x float> %val) {
|
||||
; GCN-LABEL: name: softwqm_v3f32
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0, $vgpr1, $vgpr2
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
||||
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
||||
; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
|
||||
; GCN: [[SOFT_WQM:%[0-9]+]]:vreg_96 = SOFT_WQM [[REG_SEQUENCE]], implicit $exec
|
||||
; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[SOFT_WQM]].sub0
|
||||
; GCN: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[SOFT_WQM]].sub1
|
||||
; GCN: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[SOFT_WQM]].sub2
|
||||
; GCN: $vgpr0 = COPY [[COPY3]]
|
||||
; GCN: $vgpr1 = COPY [[COPY4]]
|
||||
; GCN: $vgpr2 = COPY [[COPY5]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
|
||||
%ret = call <3 x float> @llvm.amdgcn.softwqm.v3f32(<3 x float> %val)
|
||||
ret <3 x float> %ret
|
||||
}
|
||||
|
||||
declare i1 @llvm.amdgcn.softwqm.i1(i1) #0
|
||||
declare float @llvm.amdgcn.softwqm.f32(float) #0
|
||||
declare <2 x half> @llvm.amdgcn.softwqm.v2f16(<2 x half>) #0
|
||||
declare <3 x float> @llvm.amdgcn.softwqm.v3f32(<3 x float>) #0
|
||||
declare double @llvm.amdgcn.softwqm.f64(double) #0
|
||||
|
||||
attributes #0 = { nounwind readnone speculatable }
|
82
test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wqm.ll
Normal file
82
test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wqm.ll
Normal file
@ -0,0 +1,82 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc -global-isel -march=amdgcn -mcpu=hawaii -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
define amdgpu_ps float @wqm_f32(float %val) {
|
||||
; GCN-LABEL: name: wqm_f32
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[WQM:%[0-9]+]]:vgpr_32 = WQM [[COPY]], implicit $exec
|
||||
; GCN: $vgpr0 = COPY [[WQM]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0
|
||||
%ret = call float @llvm.amdgcn.wqm.f32(float %val)
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
define amdgpu_ps float @wqm_v2f16(float %arg) {
|
||||
; GCN-LABEL: name: wqm_v2f16
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[WQM:%[0-9]+]]:vgpr_32 = WQM [[COPY]], implicit $exec
|
||||
; GCN: $vgpr0 = COPY [[WQM]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0
|
||||
%val = bitcast float %arg to <2 x half>
|
||||
%ret = call <2 x half> @llvm.amdgcn.wqm.v2f16(<2 x half> %val)
|
||||
%bc = bitcast <2 x half> %ret to float
|
||||
ret float %bc
|
||||
}
|
||||
|
||||
define amdgpu_ps <2 x float> @wqm_f64(double %val) {
|
||||
; GCN-LABEL: name: wqm_f64
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0, $vgpr1
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
||||
; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
|
||||
; GCN: [[WQM:%[0-9]+]]:vreg_64 = WQM [[REG_SEQUENCE]], implicit $exec
|
||||
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[WQM]].sub0
|
||||
; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[WQM]].sub1
|
||||
; GCN: $vgpr0 = COPY [[COPY2]]
|
||||
; GCN: $vgpr1 = COPY [[COPY3]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
|
||||
%ret = call double @llvm.amdgcn.wqm.f64(double %val)
|
||||
%bitcast = bitcast double %ret to <2 x float>
|
||||
ret <2 x float> %bitcast
|
||||
}
|
||||
|
||||
; TODO
|
||||
; define amdgpu_ps float @wqm_i1_vcc(float %val) {
|
||||
; %vcc = fcmp oeq float %val, 0.0
|
||||
; %ret = call i1 @llvm.amdgcn.wqm.i1(i1 %vcc)
|
||||
; %select = select i1 %ret, float 1.0, float 0.0
|
||||
; ret float %select
|
||||
; }
|
||||
|
||||
define amdgpu_ps <3 x float> @wqm_v3f32(<3 x float> %val) {
|
||||
; GCN-LABEL: name: wqm_v3f32
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0, $vgpr1, $vgpr2
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
||||
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
||||
; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
|
||||
; GCN: [[WQM:%[0-9]+]]:vreg_96 = WQM [[REG_SEQUENCE]], implicit $exec
|
||||
; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[WQM]].sub0
|
||||
; GCN: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[WQM]].sub1
|
||||
; GCN: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[WQM]].sub2
|
||||
; GCN: $vgpr0 = COPY [[COPY3]]
|
||||
; GCN: $vgpr1 = COPY [[COPY4]]
|
||||
; GCN: $vgpr2 = COPY [[COPY5]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
|
||||
%ret = call <3 x float> @llvm.amdgcn.wqm.v3f32(<3 x float> %val)
|
||||
ret <3 x float> %ret
|
||||
}
|
||||
|
||||
declare i1 @llvm.amdgcn.wqm.i1(i1) #0
|
||||
declare float @llvm.amdgcn.wqm.f32(float) #0
|
||||
declare <2 x half> @llvm.amdgcn.wqm.v2f16(<2 x half>) #0
|
||||
declare <3 x float> @llvm.amdgcn.wqm.v3f32(<3 x float>) #0
|
||||
declare double @llvm.amdgcn.wqm.f64(double) #0
|
||||
|
||||
attributes #0 = { nounwind readnone speculatable }
|
82
test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wwm.ll
Normal file
82
test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.wwm.ll
Normal file
@ -0,0 +1,82 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc -global-isel -march=amdgcn -mcpu=hawaii -stop-after=instruction-select -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
|
||||
|
||||
define amdgpu_ps float @wwm_f32(float %val) {
|
||||
; GCN-LABEL: name: wwm_f32
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[WWM:%[0-9]+]]:vgpr_32 = WWM [[COPY]], implicit $exec
|
||||
; GCN: $vgpr0 = COPY [[WWM]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0
|
||||
%ret = call float @llvm.amdgcn.wwm.f32(float %val)
|
||||
ret float %ret
|
||||
}
|
||||
|
||||
define amdgpu_ps float @wwm_v2f16(float %arg) {
|
||||
; GCN-LABEL: name: wwm_v2f16
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[WWM:%[0-9]+]]:vgpr_32 = WWM [[COPY]], implicit $exec
|
||||
; GCN: $vgpr0 = COPY [[WWM]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0
|
||||
%val = bitcast float %arg to <2 x half>
|
||||
%ret = call <2 x half> @llvm.amdgcn.wwm.v2f16(<2 x half> %val)
|
||||
%bc = bitcast <2 x half> %ret to float
|
||||
ret float %bc
|
||||
}
|
||||
|
||||
define amdgpu_ps <2 x float> @wwm_f64(double %val) {
|
||||
; GCN-LABEL: name: wwm_f64
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0, $vgpr1
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
||||
; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
|
||||
; GCN: [[WWM:%[0-9]+]]:vreg_64 = WWM [[REG_SEQUENCE]], implicit $exec
|
||||
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[WWM]].sub0
|
||||
; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[WWM]].sub1
|
||||
; GCN: $vgpr0 = COPY [[COPY2]]
|
||||
; GCN: $vgpr1 = COPY [[COPY3]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
|
||||
%ret = call double @llvm.amdgcn.wwm.f64(double %val)
|
||||
%bitcast = bitcast double %ret to <2 x float>
|
||||
ret <2 x float> %bitcast
|
||||
}
|
||||
|
||||
; TODO
|
||||
; define amdgpu_ps float @wwm_i1_vcc(float %val) {
|
||||
; %vcc = fcmp oeq float %val, 0.0
|
||||
; %ret = call i1 @llvm.amdgcn.wwm.i1(i1 %vcc)
|
||||
; %select = select i1 %ret, float 1.0, float 0.0
|
||||
; ret float %select
|
||||
; }
|
||||
|
||||
define amdgpu_ps <3 x float> @wwm_v3f32(<3 x float> %val) {
|
||||
; GCN-LABEL: name: wwm_v3f32
|
||||
; GCN: bb.1 (%ir-block.0):
|
||||
; GCN: liveins: $vgpr0, $vgpr1, $vgpr2
|
||||
; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
||||
; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
||||
; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
||||
; GCN: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
|
||||
; GCN: [[WWM:%[0-9]+]]:vreg_96 = WWM [[REG_SEQUENCE]], implicit $exec
|
||||
; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[WWM]].sub0
|
||||
; GCN: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[WWM]].sub1
|
||||
; GCN: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[WWM]].sub2
|
||||
; GCN: $vgpr0 = COPY [[COPY3]]
|
||||
; GCN: $vgpr1 = COPY [[COPY4]]
|
||||
; GCN: $vgpr2 = COPY [[COPY5]]
|
||||
; GCN: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
|
||||
%ret = call <3 x float> @llvm.amdgcn.wwm.v3f32(<3 x float> %val)
|
||||
ret <3 x float> %ret
|
||||
}
|
||||
|
||||
declare i1 @llvm.amdgcn.wwm.i1(i1) #0
|
||||
declare float @llvm.amdgcn.wwm.f32(float) #0
|
||||
declare <2 x half> @llvm.amdgcn.wwm.v2f16(<2 x half>) #0
|
||||
declare <3 x float> @llvm.amdgcn.wwm.v3f32(<3 x float>) #0
|
||||
declare double @llvm.amdgcn.wwm.f64(double) #0
|
||||
|
||||
attributes #0 = { nounwind readnone speculatable }
|
@ -11,7 +11,8 @@ body: |
|
||||
liveins: $sgpr0
|
||||
; CHECK-LABEL: name: wqm_s
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
|
||||
; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm), [[COPY]](s32)
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
|
||||
; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm), [[COPY1]](s32)
|
||||
%0:_(s32) = COPY $sgpr0
|
||||
%1:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.wqm), %0
|
||||
...
|
||||
|
@ -11,7 +11,8 @@ body: |
|
||||
liveins: $sgpr0
|
||||
; CHECK-LABEL: name: wwm_s
|
||||
; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
|
||||
; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.wwm), [[COPY]](s32)
|
||||
; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
|
||||
; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.wwm), [[COPY1]](s32)
|
||||
%0:_(s32) = COPY $sgpr0
|
||||
%1:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.wwm), %0
|
||||
...
|
||||
|
Loading…
x
Reference in New Issue
Block a user