[GlobalISel] combine G_TRUNC with G_MERGE_VALUES

Summary:
Truncating the result of a merge means that most likely we could have done without merge in the first place and just used the input merge inputs directly. This can be done in three cases:

1. If the truncation result is smaller than the merge source, we can use the source in the trunc directly
2. If the sizes are the same, we can replace the register or use a copy
3. If the truncation size is a multiple of the merge source size, we can build a smaller merge

This gets rid of most of the larger, hard-to-legalize merges.

Reviewers: qcolombet, aditya_nandakumar, aemerson, paquette, arsenm, Petar.Avramovic

Reviewed By: arsenm

Subscribers: sdardis, jvesely, wdng, nhaehnle, rovka, jrtc27, atanasyan, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D75915
This commit is contained in:
Dominik Montada 2020-03-09 17:00:38 +01:00
parent 6ce69c69c6
commit 7d30aae116
15 changed files with 1156 additions and 1154 deletions

View File

@ -167,7 +167,8 @@ public:
bool tryCombineTrunc(MachineInstr &MI,
SmallVectorImpl<MachineInstr *> &DeadInsts,
SmallVectorImpl<Register> &UpdatedDefs) {
SmallVectorImpl<Register> &UpdatedDefs,
GISelObserverWrapper &Observer) {
assert(MI.getOpcode() == TargetOpcode::G_TRUNC);
Builder.setInstr(MI);
@ -189,6 +190,66 @@ public:
}
}
// Try to fold trunc(merge) to directly use the source of the merge.
// This gets rid of large, difficult to legalize, merges
if (SrcMI->getOpcode() == TargetOpcode::G_MERGE_VALUES) {
const Register MergeSrcReg = SrcMI->getOperand(1).getReg();
const LLT MergeSrcTy = MRI.getType(MergeSrcReg);
const LLT DstTy = MRI.getType(DstReg);
// We can only fold if the types are scalar
const unsigned DstSize = DstTy.getSizeInBits();
const unsigned MergeSrcSize = MergeSrcTy.getSizeInBits();
if (!DstTy.isScalar() || !MergeSrcTy.isScalar())
return false;
if (DstSize < MergeSrcSize) {
// When the merge source is larger than the destination, we can just
// truncate the merge source directly
if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
return false;
LLVM_DEBUG(dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: "
<< MI);
Builder.buildTrunc(DstReg, MergeSrcReg);
UpdatedDefs.push_back(DstReg);
} else if (DstSize == MergeSrcSize) {
// If the sizes match we can simply try to replace the register
LLVM_DEBUG(
dbgs() << "Replacing G_TRUNC(G_MERGE_VALUES) with merge input: "
<< MI);
replaceRegOrBuildCopy(DstReg, MergeSrcReg, MRI, Builder, UpdatedDefs,
Observer);
} else if (DstSize % MergeSrcSize == 0) {
// If the trunc size is a multiple of the merge source size we can use
// a smaller merge instead
if (isInstUnsupported(
{TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
return false;
LLVM_DEBUG(
dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: "
<< MI);
const unsigned NumSrcs = DstSize / MergeSrcSize;
assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
"trunc(merge) should require less inputs than merge");
SmallVector<Register, 2> SrcRegs(NumSrcs);
for (unsigned i = 0; i < NumSrcs; ++i)
SrcRegs[i] = SrcMI->getOperand(i + 1).getReg();
Builder.buildMerge(DstReg, SrcRegs);
UpdatedDefs.push_back(DstReg);
} else {
// Unable to combine
return false;
}
markInstAndDefDead(MI, *SrcMI, DeadInsts);
return true;
}
return false;
}
@ -533,7 +594,7 @@ public:
Changed = tryCombineExtract(MI, DeadInsts, UpdatedDefs);
break;
case TargetOpcode::G_TRUNC:
Changed = tryCombineTrunc(MI, DeadInsts, UpdatedDefs);
Changed = tryCombineTrunc(MI, DeadInsts, UpdatedDefs, WrapperObserver);
if (!Changed) {
// Try to combine truncates away even if they are legal. As all artifact
// combines at the moment look only "up" the def-use chains, we achieve

View File

@ -8,9 +8,7 @@ body: |
; CHECK-LABEL: name: test_implicit_def
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[DEF]](s64), [[DEF]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[MV]](s128)
; CHECK: $x0 = COPY [[TRUNC]](s64)
; CHECK: $x0 = COPY [[DEF]](s64)
%0:_(s128) = G_IMPLICIT_DEF
%1:_(s64) = G_TRUNC %0(s128)
$x0 = COPY %1(s64)

View File

@ -0,0 +1,136 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer %s -o - | FileCheck %s
---
name: trunc_s16_merge_s64_s32
body: |
bb.0:
; Test that trunc(merge) with trunc-size < merge-source-size creates a trunc
; of the merge source
; CHECK-LABEL: name: trunc_s16_merge_s64_s32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: $vgpr0 = COPY [[C]](s32)
%0:_(s32) = G_CONSTANT i32 0
%1:_(s32) = G_CONSTANT i32 1
%2:_(s64) = G_MERGE_VALUES %0, %1
%3:_(s16) = G_TRUNC %2
%4:_(s32) = G_ANYEXT %3
$vgpr0 = COPY %4
...
---
name: trunc_s32_merge_s64_s32
body: |
bb.0:
; Test that trunc(merge) with trunc-size == merge-source-size is eliminated
; CHECK-LABEL: name: trunc_s32_merge_s64_s32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: $vgpr0 = COPY [[C]](s32)
%0:_(s32) = G_CONSTANT i32 0
%1:_(s32) = G_CONSTANT i32 1
%2:_(s64) = G_MERGE_VALUES %0, %1
%3:_(s32) = G_TRUNC %2
$vgpr0 = COPY %3
...
---
name: trunc_s64_merge_s128_s32
body: |
bb.0:
; Test that trunc(merge) with trunc-size > merge-source-size combines to a
; smaller merge
; CHECK-LABEL: name: trunc_s64_merge_s128_s32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C]](s32), [[C1]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s32) = G_CONSTANT i32 0
%1:_(s32) = G_CONSTANT i32 1
%2:_(s128) = G_MERGE_VALUES %0, %1, %0, %1
%3:_(s64) = G_TRUNC %2
$vgpr0_vgpr1 = COPY %3
...
---
name: trunc_s32_merge_s128_p0
body: |
bb.0:
; Test that trunc(merge) with a non-scalar merge source is not combined
; CHECK-LABEL: name: trunc_s32_merge_s128_p0
; CHECK: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; CHECK: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 1
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](p0), [[C1]](p0)
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s128)
; CHECK: $vgpr0 = COPY [[TRUNC]](s32)
%0:_(p0) = G_CONSTANT i64 0
%1:_(p0) = G_CONSTANT i64 1
%2:_(s128) = G_MERGE_VALUES %0, %1
%3:_(s32) = G_TRUNC %2
$vgpr0 = COPY %3
...
---
name: trunc_s64_merge_s128_p0
body: |
bb.0:
; Test that trunc(merge) with a non-scalar merge source is not combined
; CHECK-LABEL: name: trunc_s64_merge_s128_p0
; CHECK: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; CHECK: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 1
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](p0), [[C1]](p0)
; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[MV]](s128)
; CHECK: $vgpr0_vgpr1 = COPY [[TRUNC]](s64)
%0:_(p0) = G_CONSTANT i64 0
%1:_(p0) = G_CONSTANT i64 1
%2:_(s128) = G_MERGE_VALUES %0, %1
%3:_(s64) = G_TRUNC %2
$vgpr0_vgpr1 = COPY %3
...
---
name: trunc_s128_merge_s192_p0
body: |
bb.0:
; Test that trunc(merge) with a non-scalar merge source is not combined
; CHECK-LABEL: name: trunc_s128_merge_s192_p0
; CHECK: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
; CHECK: [[C1:%[0-9]+]]:_(p0) = G_CONSTANT i64 1
; CHECK: [[MV:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[C]](p0), [[C1]](p0), [[C]](p0)
; CHECK: [[TRUNC:%[0-9]+]]:_(s128) = G_TRUNC [[MV]](s192)
; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[TRUNC]](s128)
%0:_(p0) = G_CONSTANT i64 0
%1:_(p0) = G_CONSTANT i64 1
%2:_(s192) = G_MERGE_VALUES %0, %1, %0
%3:_(s128) = G_TRUNC %2
$vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
...
---
name: trunc_s68_merge_s128_s32
body: |
bb.0:
; Test that trunc(merge) with trunc-size > merge-source-size is not combined
; if trunc-size % merge-source-size != 0
; CHECK-LABEL: name: trunc_s68_merge_s128_s32
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s32), [[C1]](s32), [[C]](s32), [[C1]](s32)
; CHECK: [[TRUNC:%[0-9]+]]:_(s68) = G_TRUNC [[MV]](s128)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[TRUNC]](s68)
; CHECK: $vgpr0 = COPY [[TRUNC1]](s32)
%0:_(s32) = G_CONSTANT i32 0
%1:_(s32) = G_CONSTANT i32 1
%2:_(s128) = G_MERGE_VALUES %0, %1, %0, %1
%3:_(s68) = G_TRUNC %2
%4:_(s32) = G_TRUNC %3
$vgpr0 = COPY %4
...

View File

@ -292,19 +292,17 @@ body: |
; CHECK-LABEL: name: test_bitcast_s24_to_v3s8
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[DEF]](s32)
; CHECK: [[DEF1:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[DEF1]](s64)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[TRUNC]](s32)
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[DEF1]](s64)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s32)
; CHECK: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C1]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC3]], [[C1]](s16)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[C1]](s16)
; CHECK: [[LSHR2:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC2]], [[C1]](s16)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR1]](s16)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
@ -328,23 +326,21 @@ body: |
; CHECK-LABEL: name: test_bitcast_s48_to_v3s16
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[DEF]](s64)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[DEF]](s64)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC1]], [[C]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]]
; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[C2]], [[C]](s32)

View File

@ -207,13 +207,8 @@ body: |
; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
; CHECK: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
; CHECK: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY [[C2]](s64)
; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[COPY3]]
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY [[AND1]](s64)
; CHECK: $vgpr0_vgpr1 = COPY [[COPY4]](s64)
; CHECK: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[USUBO]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[ZEXT1]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s33) = G_TRUNC %0
%2:_(s33) = G_CTLZ_ZERO_UNDEF %1

View File

@ -253,13 +253,8 @@ body: |
; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C3]](s64)
; CHECK: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
; CHECK: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK: [[COPY3:%[0-9]+]]:_(s64) = COPY [[C4]](s64)
; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY2]], [[COPY3]]
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY [[AND1]](s64)
; CHECK: $vgpr0_vgpr1 = COPY [[COPY4]](s64)
; CHECK: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[USUBO]](s32)
; CHECK: $vgpr0_vgpr1 = COPY [[ZEXT1]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s33) = G_TRUNC %0
%2:_(s33) = G_CTLZ %1

File diff suppressed because it is too large Load Diff

View File

@ -233,31 +233,29 @@ body: |
; CHECK-LABEL: name: test_unmerge_s8_s48
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[DEF]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[DEF]](s64)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC1]], [[C]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
; CHECK: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[COPY1]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; CHECK: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[COPY3]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
; CHECK: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[C1]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; CHECK: [[COPY10:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
; CHECK: $vgpr0 = COPY [[COPY6]](s32)
; CHECK: $vgpr1 = COPY [[COPY7]](s32)
@ -290,16 +288,14 @@ body: |
; CHECK-LABEL: name: test_unmerge_s16_s48
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV]](s32), [[UV1]](s32)
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[DEF]](s64)
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[DEF]](s64)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC1]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[UV]], [[C]](s32)
; CHECK: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[TRUNC]], [[C]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC1]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
; CHECK: $vgpr0 = COPY [[COPY1]](s32)
; CHECK: $vgpr1 = COPY [[COPY2]](s32)
; CHECK: $vgpr2 = COPY [[COPY3]](s32)

View File

@ -692,25 +692,23 @@ body: |
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[C4]], [[SHL1]]
; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[OR1]](s32)
; CHECK: [[DEF1:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
; CHECK: [[MV1:%[0-9]+]]:_(s448) = G_MERGE_VALUES [[MV]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64), [[DEF1]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s128) = G_TRUNC [[MV1]](s448)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[TRUNC1]](s128), 0
; CHECK: [[EXTRACT1:%[0-9]+]]:_(s48) = G_EXTRACT [[TRUNC1]](s128), 64
; CHECK: [[COPY5:%[0-9]+]]:_(s128) = COPY [[INSERT1]](s128)
; CHECK: [[EXTRACT2:%[0-9]+]]:_(s64) = G_EXTRACT [[COPY5]](s128), 0
; CHECK: [[EXTRACT3:%[0-9]+]]:_(s48) = G_EXTRACT [[COPY5]](s128), 64
; CHECK: [[AND2:%[0-9]+]]:_(s64) = G_AND [[EXTRACT]], [[EXTRACT2]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[EXTRACT1]](s48)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[EXTRACT3]](s48)
; CHECK: [[COPY5:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
; CHECK: [[EXTRACT:%[0-9]+]]:_(s48) = G_EXTRACT [[DEF1]](s64), 0
; CHECK: [[COPY6:%[0-9]+]]:_(s128) = COPY [[INSERT1]](s128)
; CHECK: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[COPY6]](s128), 0
; CHECK: [[EXTRACT2:%[0-9]+]]:_(s48) = G_EXTRACT [[COPY6]](s128), 64
; CHECK: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY5]], [[EXTRACT1]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[EXTRACT]](s48)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[EXTRACT2]](s48)
; CHECK: [[AND3:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[ANYEXT1]]
; CHECK: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[AND3]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s48) = G_TRUNC [[AND3]](s64)
; CHECK: [[DEF2:%[0-9]+]]:_(s128) = G_IMPLICIT_DEF
; CHECK: [[COPY6:%[0-9]+]]:_(s128) = COPY [[DEF2]](s128)
; CHECK: [[INSERT2:%[0-9]+]]:_(s128) = G_INSERT [[COPY6]], [[AND2]](s64), 0
; CHECK: [[COPY7:%[0-9]+]]:_(s128) = COPY [[INSERT2]](s128)
; CHECK: [[INSERT3:%[0-9]+]]:_(s128) = G_INSERT [[COPY7]], [[TRUNC2]](s48), 64
; CHECK: [[TRUNC3:%[0-9]+]]:_(s112) = G_TRUNC [[INSERT3]](s128)
; CHECK: S_ENDPGM 0, implicit [[TRUNC3]](s112)
; CHECK: [[COPY7:%[0-9]+]]:_(s128) = COPY [[DEF2]](s128)
; CHECK: [[INSERT2:%[0-9]+]]:_(s128) = G_INSERT [[COPY7]], [[AND2]](s64), 0
; CHECK: [[COPY8:%[0-9]+]]:_(s128) = COPY [[INSERT2]](s128)
; CHECK: [[INSERT3:%[0-9]+]]:_(s128) = G_INSERT [[COPY8]], [[TRUNC1]](s48), 64
; CHECK: [[TRUNC2:%[0-9]+]]:_(s112) = G_TRUNC [[INSERT3]](s128)
; CHECK: S_ENDPGM 0, implicit [[TRUNC2]](s112)
%0:_(s32) = COPY $vgpr0
%1:_(s2) = G_TRUNC %0
%2:_(s112) = G_ZEXT %1

View File

@ -525,8 +525,7 @@ body: |
; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY1]](s32)
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
; MIPS32: $v0 = COPY [[LSHR]](s32)
; MIPS32: RetRA implicit $v0
%0:_(s32) = COPY $a0
@ -707,27 +706,26 @@ body: |
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY4]], [[C]]
; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY4]]
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY2]], [[C]]
; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY2]]
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[C]]
; MIPS32: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY4]](s32), [[C1]]
; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY4]](s32)
; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY2]](s32), [[C]]
; MIPS32: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY2]](s32)
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[SUB1]](s32)
; MIPS32: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[COPY4]](s32)
; MIPS32: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[COPY2]](s32)
; MIPS32: [[OR:%[0-9]+]]:_(s32) = G_OR [[LSHR]], [[SHL1]]
; MIPS32: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[SUB]](s32)
; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; MIPS32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[SHL]], [[C1]]
; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C2]]
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
; MIPS32: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s32), [[OR]], [[SHL2]]
; MIPS32: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C2]]
; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C2]]
; MIPS32: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s32), [[COPY1]], [[SELECT1]]
; MIPS32: $v0 = COPY [[SELECT]](s32)
; MIPS32: $v1 = COPY [[SELECT2]](s32)
@ -759,29 +757,28 @@ body: |
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY4]], [[C]]
; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY4]]
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY2]], [[C]]
; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY2]]
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[C]]
; MIPS32: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY4]](s32), [[C1]]
; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[COPY4]](s32)
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY4]](s32)
; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY2]](s32), [[C]]
; MIPS32: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[COPY2]](s32)
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY2]](s32)
; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[SUB1]](s32)
; MIPS32: [[OR:%[0-9]+]]:_(s32) = G_OR [[LSHR]], [[SHL]]
; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; MIPS32: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[C2]](s32)
; MIPS32: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[COPY1]], [[SUB]](s32)
; MIPS32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]]
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C3]]
; MIPS32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[OR]], [[ASHR2]]
; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C3]]
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C3]]
; MIPS32: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s32), [[COPY]], [[SELECT]]
; MIPS32: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C3]]
; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C3]]
; MIPS32: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s32), [[ASHR]], [[ASHR1]]
; MIPS32: $v0 = COPY [[SELECT1]](s32)
; MIPS32: $v1 = COPY [[SELECT2]](s32)
@ -813,27 +810,26 @@ body: |
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY4]], [[C]]
; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY4]]
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY2]], [[C]]
; MIPS32: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY2]]
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY4]](s32), [[C]]
; MIPS32: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY4]](s32), [[C1]]
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[COPY4]](s32)
; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY4]](s32)
; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY2]](s32), [[C]]
; MIPS32: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY2]](s32), [[C1]]
; MIPS32: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[COPY2]](s32)
; MIPS32: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[COPY2]](s32)
; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[SUB1]](s32)
; MIPS32: [[OR:%[0-9]+]]:_(s32) = G_OR [[LSHR1]], [[SHL]]
; MIPS32: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[SUB]](s32)
; MIPS32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C2]]
; MIPS32: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[OR]], [[LSHR2]]
; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C2]]
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
; MIPS32: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C2]]
; MIPS32: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s32), [[COPY]], [[SELECT]]
; MIPS32: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C2]]
; MIPS32: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; MIPS32: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C2]]
; MIPS32: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s32), [[LSHR]], [[C1]]
; MIPS32: $v0 = COPY [[SELECT1]](s32)
; MIPS32: $v1 = COPY [[SELECT2]](s32)

View File

@ -17,8 +17,7 @@ body: |
; MIPS32: liveins: $a0, $a1
; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; MIPS32: $v0 = COPY [[COPY2]](s32)
; MIPS32: $v0 = COPY [[COPY]](s32)
; MIPS32: RetRA implicit $v0
%1:_(s32) = COPY $a0
%2:_(s32) = COPY $a1

View File

@ -218,8 +218,7 @@ body: |
; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.px)
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[LOAD]], [[COPY1]](s32)
; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[LOAD]], [[C]](s32)
; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[ASHR]](s32)
; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
; MIPS32: $v0 = COPY [[UV]](s32)

View File

@ -44,8 +44,7 @@ body: |
; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[COPY1]](s32)
; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s32)
; MIPS32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[ASHR]](s32)
; MIPS32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
; MIPS32: $v0 = COPY [[UV]](s32)

View File

@ -114,8 +114,7 @@ define i64 @load4_s32_to_sextLoad4_s64(i32* %px) {
; MIPS32-LABEL: load4_s32_to_sextLoad4_s64:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: lw $1, 0($4)
; MIPS32-NEXT: ori $2, $zero, 31
; MIPS32-NEXT: srav $3, $1, $2
; MIPS32-NEXT: sra $3, $1, 31
; MIPS32-NEXT: move $2, $1
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop

View File

@ -16,8 +16,7 @@ entry:
define i64 @sext(i32 %x) {
; MIPS32-LABEL: sext:
; MIPS32: # %bb.0: # %entry
; MIPS32-NEXT: ori $1, $zero, 31
; MIPS32-NEXT: srav $3, $4, $1
; MIPS32-NEXT: sra $3, $4, 31
; MIPS32-NEXT: move $2, $4
; MIPS32-NEXT: jr $ra
; MIPS32-NEXT: nop