2009-09-15 04:27:29 +00:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
|
2009-09-15 02:25:21 +00:00
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
|
2009-09-15 02:22:47 +00:00
|
|
|
|
|
|
|
define i32 @test1(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test1:
|
|
|
|
; CHECK: btl
|
|
|
|
; CHECK-NEXT: movl $12, %eax
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
; CHECK-NEXT: cmovael (%rcx), %eax
|
2009-09-15 02:22:47 +00:00
|
|
|
; CHECK-NEXT: ret
|
|
|
|
|
|
|
|
%0 = lshr i32 %x, %n ; <i32> [#uses=1]
|
|
|
|
%1 = and i32 %0, 1 ; <i32> [#uses=1]
|
|
|
|
%toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
|
|
|
|
%v = load i32* %vp
|
|
|
|
%.0 = select i1 %toBool, i32 %v, i32 12 ; <i32> [#uses=1]
|
|
|
|
ret i32 %.0
|
|
|
|
}
|
|
|
|
define i32 @test2(i32 %x, i32 %n, i32 %w, i32* %vp) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
; CHECK: test2:
|
|
|
|
; CHECK: btl
|
|
|
|
; CHECK-NEXT: movl $12, %eax
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
; CHECK-NEXT: cmovbl (%rcx), %eax
|
2009-09-15 02:22:47 +00:00
|
|
|
; CHECK-NEXT: ret
|
|
|
|
|
|
|
|
%0 = lshr i32 %x, %n ; <i32> [#uses=1]
|
|
|
|
%1 = and i32 %0, 1 ; <i32> [#uses=1]
|
|
|
|
%toBool = icmp eq i32 %1, 0 ; <i1> [#uses=1]
|
|
|
|
%v = load i32* %vp
|
|
|
|
%.0 = select i1 %toBool, i32 12, i32 %v ; <i32> [#uses=1]
|
|
|
|
ret i32 %.0
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-15 15:09:54 +00:00
|
|
|
; x86's 32-bit cmov doesn't clobber the high 32 bits of the destination
|
|
|
|
; if the condition is false. An explicit zero-extend (movl) is needed
|
|
|
|
; after the cmov.
|
|
|
|
|
2009-09-15 02:22:47 +00:00
|
|
|
declare void @bar(i64) nounwind
|
|
|
|
|
|
|
|
define void @test3(i64 %a, i64 %b, i1 %p) nounwind {
|
|
|
|
; CHECK: test3:
|
Instruction fixes, added instructions, and AsmString changes in the
X86 instruction tables.
Also (while I was at it) cleaned up the X86 tables, removing tabs and
80-line violations.
This patch was reviewed by Chris Lattner, but please let me know if
there are any problems.
* X86*.td
Removed tabs and fixed 80-line violations
* X86Instr64bit.td
(IRET, POPCNT, BT_, LSL, SWPGS, PUSH_S, POP_S, L_S, SMSW)
Added
(CALL, CMOV) Added qualifiers
(JMP) Added PC-relative jump instruction
(POPFQ/PUSHFQ) Added qualifiers; renamed PUSHFQ to indicate
that it is 64-bit only (ambiguous since it has no
REX prefix)
(MOV) Added rr form going the other way, which is encoded
differently
(MOV) Changed immediates to offsets, which is more correct;
also fixed MOV64o64a to have to a 64-bit offset
(MOV) Fixed qualifiers
(MOV) Added debug-register and condition-register moves
(MOVZX) Added more forms
(ADC, SUB, SBB, AND, OR, XOR) Added reverse forms, which
(as with MOV) are encoded differently
(ROL) Made REX.W required
(BT) Uncommented mr form for disassembly only
(CVT__2__) Added several missing non-intrinsic forms
(LXADD, XCHG) Reordered operands to make more sense for
MRMSrcMem
(XCHG) Added register-to-register forms
(XADD, CMPXCHG, XCHG) Added non-locked forms
* X86InstrSSE.td
(CVTSS2SI, COMISS, CVTTPS2DQ, CVTPS2PD, CVTPD2PS, MOVQ)
Added
* X86InstrFPStack.td
(COM_FST0, COMP_FST0, COM_FI, COM_FIP, FFREE, FNCLEX, FNOP,
FXAM, FLDL2T, FLDL2E, FLDPI, FLDLG2, FLDLN2, F2XM1, FYL2X,
FPTAN, FPATAN, FXTRACT, FPREM1, FDECSTP, FINCSTP, FPREM,
FYL2XP1, FSINCOS, FRNDINT, FSCALE, FCOMPP, FXSAVE,
FXRSTOR)
Added
(FCOM, FCOMP) Added qualifiers
(FSTENV, FSAVE, FSTSW) Fixed opcode names
(FNSTSW) Added implicit register operand
* X86InstrInfo.td
(opaque512mem) Added for FXSAVE/FXRSTOR
(offset8, offset16, offset32, offset64) Added for MOV
(NOOPW, IRET, POPCNT, IN, BTC, BTR, BTS, LSL, INVLPG, STR,
LTR, PUSHFS, PUSHGS, POPFS, POPGS, LDS, LSS, LES, LFS,
LGS, VERR, VERW, SGDT, SIDT, SLDT, LGDT, LIDT, LLDT,
LODSD, OUTSB, OUTSW, OUTSD, HLT, RSM, FNINIT, CLC, STC,
CLI, STI, CLD, STD, CMC, CLTS, XLAT, WRMSR, RDMSR, RDPMC,
SMSW, LMSW, CPUID, INVD, WBINVD, INVEPT, INVVPID, VMCALL,
VMCLEAR, VMLAUNCH, VMRESUME, VMPTRLD, VMPTRST, VMREAD,
VMWRITE, VMXOFF, VMXON) Added
(NOOPL, POPF, POPFD, PUSHF, PUSHFD) Added qualifier
(JO, JNO, JB, JAE, JE, JNE, JBE, JA, JS, JNS, JP, JNP, JL,
JGE, JLE, JG, JCXZ) Added 32-bit forms
(MOV) Changed some immediate forms to offset forms
(MOV) Added reversed reg-reg forms, which are encoded
differently
(MOV) Added debug-register and condition-register moves
(CMOV) Added qualifiers
(AND, OR, XOR, ADC, SUB, SBB) Added reverse forms, like MOV
(BT) Uncommented memory-register forms for disassembler
(MOVSX, MOVZX) Added forms
(XCHG, LXADD) Made operand order make sense for MRMSrcMem
(XCHG) Added register-register forms
(XADD, CMPXCHG) Added unlocked forms
* X86InstrMMX.td
(MMX_MOVD, MMV_MOVQ) Added forms
* X86InstrInfo.cpp: Changed PUSHFQ to PUSHFQ64 to reflect table
change
* X86RegisterInfo.td: Added debug and condition register sets
* x86-64-pic-3.ll: Fixed testcase to reflect call qualifier
* peep-test-3.ll: Fixed testcase to reflect test qualifier
* cmov.ll: Fixed testcase to reflect cmov qualifier
* loop-blocks.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-11.ll: Fixed testcase to reflect call qualifier
* 2009-11-04-SubregCoalescingBug.ll: Fixed testcase to reflect call
qualifier
* x86-64-pic-2.ll: Fixed testcase to reflect call qualifier
* live-out-reg-info.ll: Fixed testcase to reflect test qualifier
* tail-opts.ll: Fixed testcase to reflect call qualifiers
* x86-64-pic-10.ll: Fixed testcase to reflect call qualifier
* bss-pagealigned.ll: Fixed testcase to reflect call qualifier
* x86-64-pic-1.ll: Fixed testcase to reflect call qualifier
* widen_load-1.ll: Fixed testcase to reflect call qualifier
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@91638 91177308-0d34-0410-b5e6-96231b3b80d8
2009-12-18 00:01:26 +00:00
|
|
|
; CHECK: cmovnel %edi, %esi
|
2009-09-15 02:22:47 +00:00
|
|
|
; CHECK-NEXT: movl %esi, %edi
|
|
|
|
|
|
|
|
%c = trunc i64 %a to i32
|
|
|
|
%d = trunc i64 %b to i32
|
|
|
|
%e = select i1 %p, i32 %c, i32 %d
|
|
|
|
%f = zext i32 %e to i64
|
|
|
|
call void @bar(i64 %f)
|
|
|
|
ret void
|
|
|
|
}
|
2009-09-15 02:25:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
; CodeGen shouldn't try to do a setne after an expanded 8-bit conditional
|
|
|
|
; move without recomputing EFLAGS, because the expansion of the conditional
|
|
|
|
; move with control flow may clobber EFLAGS (e.g., with xor, to set the
|
|
|
|
; register to zero).
|
|
|
|
|
|
|
|
; The test is a little awkward; the important part is that there's a test before the
|
|
|
|
; setne.
|
|
|
|
; PR4814
|
|
|
|
|
|
|
|
|
|
|
|
@g_3 = external global i8 ; <i8*> [#uses=1]
|
|
|
|
@g_96 = external global i8 ; <i8*> [#uses=2]
|
|
|
|
@g_100 = external global i8 ; <i8*> [#uses=2]
|
|
|
|
@_2E_str = external constant [15 x i8], align 1 ; <[15 x i8]*> [#uses=1]
|
|
|
|
|
|
|
|
define i32 @test4() nounwind {
|
|
|
|
entry:
|
|
|
|
%0 = load i8* @g_3, align 1 ; <i8> [#uses=2]
|
|
|
|
%1 = sext i8 %0 to i32 ; <i32> [#uses=1]
|
|
|
|
%.lobit.i = lshr i8 %0, 7 ; <i8> [#uses=1]
|
|
|
|
%tmp.i = zext i8 %.lobit.i to i32 ; <i32> [#uses=1]
|
|
|
|
%tmp.not.i = xor i32 %tmp.i, 1 ; <i32> [#uses=1]
|
|
|
|
%iftmp.17.0.i.i = ashr i32 %1, %tmp.not.i ; <i32> [#uses=1]
|
|
|
|
%retval56.i.i = trunc i32 %iftmp.17.0.i.i to i8 ; <i8> [#uses=1]
|
|
|
|
%2 = icmp eq i8 %retval56.i.i, 0 ; <i1> [#uses=2]
|
|
|
|
%g_96.promoted.i = load i8* @g_96 ; <i8> [#uses=3]
|
|
|
|
%3 = icmp eq i8 %g_96.promoted.i, 0 ; <i1> [#uses=2]
|
|
|
|
br i1 %3, label %func_4.exit.i, label %bb.i.i.i
|
|
|
|
|
|
|
|
bb.i.i.i: ; preds = %entry
|
2011-11-27 06:54:59 +00:00
|
|
|
%4 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
|
2009-09-15 02:25:21 +00:00
|
|
|
br label %func_4.exit.i
|
|
|
|
|
|
|
|
; CHECK: test4:
|
|
|
|
; CHECK: g_100
|
|
|
|
; CHECK: testb
|
2011-09-02 23:52:55 +00:00
|
|
|
; CHECK-NOT: xor
|
|
|
|
; CHECK: setne
|
2009-09-15 02:25:21 +00:00
|
|
|
; CHECK-NEXT: testb
|
|
|
|
|
|
|
|
func_4.exit.i: ; preds = %bb.i.i.i, %entry
|
|
|
|
%.not.i = xor i1 %2, true ; <i1> [#uses=1]
|
|
|
|
%brmerge.i = or i1 %3, %.not.i ; <i1> [#uses=1]
|
|
|
|
%.mux.i = select i1 %2, i8 %g_96.promoted.i, i8 0 ; <i8> [#uses=1]
|
|
|
|
br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
|
|
|
|
|
|
|
|
bb.i.i: ; preds = %func_4.exit.i
|
2011-11-27 06:54:59 +00:00
|
|
|
%5 = load volatile i8* @g_100, align 1 ; <i8> [#uses=0]
|
2009-09-15 02:25:21 +00:00
|
|
|
br label %func_1.exit
|
|
|
|
|
|
|
|
func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
|
|
|
|
%g_96.tmp.0.i = phi i8 [ %g_96.promoted.i, %bb.i.i ], [ %.mux.i, %func_4.exit.i ] ; <i8> [#uses=2]
|
|
|
|
store i8 %g_96.tmp.0.i, i8* @g_96
|
|
|
|
%6 = zext i8 %g_96.tmp.0.i to i32 ; <i32> [#uses=1]
|
|
|
|
%7 = tail call i32 (i8*, ...)* @printf(i8* noalias getelementptr ([15 x i8]* @_2E_str, i64 0, i64 0), i32 %6) nounwind ; <i32> [#uses=0]
|
|
|
|
ret i32 0
|
|
|
|
}
|
|
|
|
|
|
|
|
declare i32 @printf(i8* nocapture, ...) nounwind
|
|
|
|
|
|
|
|
|
|
|
|
; Should compile to setcc | -2.
|
|
|
|
; rdar://6668608
|
|
|
|
define i32 @test5(i32* nocapture %P) nounwind readonly {
|
|
|
|
entry:
|
|
|
|
; CHECK: test5:
|
|
|
|
; CHECK: setg %al
|
2011-04-14 01:46:37 +00:00
|
|
|
; CHECK: movzbl %al, %eax
|
2009-09-15 02:25:21 +00:00
|
|
|
; CHECK: orl $-2, %eax
|
|
|
|
; CHECK: ret
|
|
|
|
|
|
|
|
%0 = load i32* %P, align 4 ; <i32> [#uses=1]
|
|
|
|
%1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
|
|
|
|
%iftmp.0.0 = select i1 %1, i32 -1, i32 -2 ; <i32> [#uses=1]
|
|
|
|
ret i32 %iftmp.0.0
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test6(i32* nocapture %P) nounwind readonly {
|
|
|
|
entry:
|
|
|
|
; CHECK: test6:
|
|
|
|
; CHECK: setl %al
|
2011-04-14 01:46:37 +00:00
|
|
|
; CHECK: movzbl %al, %eax
|
2009-09-15 02:25:21 +00:00
|
|
|
; CHECK: leal 4(%rax,%rax,8), %eax
|
|
|
|
; CHECK: ret
|
|
|
|
%0 = load i32* %P, align 4 ; <i32> [#uses=1]
|
|
|
|
%1 = icmp sgt i32 %0, 41 ; <i1> [#uses=1]
|
|
|
|
%iftmp.0.0 = select i1 %1, i32 4, i32 13 ; <i32> [#uses=1]
|
|
|
|
ret i32 %iftmp.0.0
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-15 02:27:23 +00:00
|
|
|
; Don't try to use a 16-bit conditional move to do an 8-bit select,
|
|
|
|
; because it isn't worth it. Just use a branch instead.
|
|
|
|
define i8 @test7(i1 inreg %c, i8 inreg %a, i8 inreg %b) nounwind {
|
|
|
|
; CHECK: test7:
|
|
|
|
; CHECK: testb $1, %dil
|
|
|
|
; CHECK-NEXT: jne LBB
|
|
|
|
|
|
|
|
%d = select i1 %c, i8 %a, i8 %b
|
|
|
|
ret i8 %d
|
|
|
|
}
|