[AArch64][GlobalISel] Optimize away a Not feeding a brcond by using tbz instead of tbnz.

Usually brconds are fed by compares, but not always, in which case we would
miss this fold.

Differential Revision: https://reviews.llvm.org/D86413
This commit is contained in:
Amara Emerson 2020-08-22 23:28:07 -07:00
parent 399486642d
commit b1b6d87965
2 changed files with 91 additions and 2 deletions

View File

@ -41,6 +41,7 @@
#define DEBUG_TYPE "aarch64-isel"
using namespace llvm;
using namespace MIPatternMatch;
namespace {
@ -1883,7 +1884,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return false;
}
const Register CondReg = I.getOperand(0).getReg();
Register CondReg = I.getOperand(0).getReg();
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
// Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z
@ -1893,7 +1894,19 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
return true;
if (ProduceNonFlagSettingCondBr) {
auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
unsigned BOpc = AArch64::TBNZW;
// Try to fold a not, i.e. a xor, cond, 1.
Register XorSrc;
int64_t Cst;
if (mi_match(CondReg, MRI,
m_GTrunc(m_GXor(m_Reg(XorSrc), m_ICst(Cst)))) &&
Cst == 1) {
CondReg = XorSrc;
BOpc = AArch64::TBZW;
if (MRI.getType(XorSrc).getSizeInBits() > 32)
BOpc = AArch64::TBZX;
}
auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(BOpc))
.addUse(CondReg)
.addImm(/*bit offset=*/0)
.addMBB(DestMBB);

View File

@ -0,0 +1,76 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
---
name: condbr_of_not
legalized: true
regBankSelected: true
liveins:
- { reg: '$x0' }
body: |
; CHECK-LABEL: name: condbr_of_not
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load 1)
; CHECK: TBZW [[LDRBBui]], 0, %bb.2
; CHECK: bb.1:
; CHECK: RET_ReallyLR
; CHECK: bb.2:
; CHECK: RET_ReallyLR
bb.1:
successors: %bb.2, %bb.3
liveins: $x0
%0:gpr(p0) = COPY $x0
%8:gpr(s8) = G_LOAD %0(p0) :: (load 1)
%4:gpr(s32) = G_ANYEXT %8(s8)
%5:gpr(s32) = G_CONSTANT i32 1
%6:gpr(s32) = G_XOR %4, %5
%3:gpr(s1) = G_TRUNC %6(s32)
G_BRCOND %3(s1), %bb.3
bb.2:
RET_ReallyLR
bb.3:
RET_ReallyLR
...
---
name: condbr_of_not_64
legalized: true
regBankSelected: true
liveins:
- { reg: '$x0' }
body: |
; CHECK-LABEL: name: condbr_of_not_64
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[COPY]], 0 :: (load 1)
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[LDRBBui]], %subreg.sub_32
; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY [[SUBREG_TO_REG]]
; CHECK: TBZX [[COPY1]], 0, %bb.2
; CHECK: bb.1:
; CHECK: RET_ReallyLR
; CHECK: bb.2:
; CHECK: RET_ReallyLR
bb.1:
successors: %bb.2, %bb.3
liveins: $x0
%0:gpr(p0) = COPY $x0
%8:gpr(s8) = G_LOAD %0(p0) :: (load 1)
%4:gpr(s64) = G_ANYEXT %8(s8)
%5:gpr(s64) = G_CONSTANT i64 1
%6:gpr(s64) = G_XOR %4, %5
%3:gpr(s1) = G_TRUNC %6(s64)
G_BRCOND %3(s1), %bb.3
bb.2:
RET_ReallyLR
bb.3:
RET_ReallyLR
...