TableGen: fix operand counting for aliases

TableGen has a fairly dubious heuristic to decide whether an alias should be
printed: does the alias have lest operands than the real instruction. This is
bad enough (particularly with no way to override it), but it should at least be
calculated consistently for both strings.

This patch implements that logic: first get the *correct* string for the
variant, in the same way as the Matcher, without guessing; then count the
number of whitespace chars.

There are basically 4 changes this brings about after the previous
commits; all of these appear to be good, so I have changed the tests:

+ ARM64: we print "neg X, Y" instead of "sub X, xzr, Y".
+ ARM64: we skip implicit "uxtx" and "uxtw" modifiers.
+ Sparc: we print "mov A, B" instead of "or %g0, A, B".
+ Sparc: we print "fcmpX A, B" instead of "fcmpX %fcc0, A, B"

llvm-svn: 208969
This commit is contained in:
Tim Northover 2014-05-16 09:42:04 +00:00
parent a52367cca6
commit 31e1362588
21 changed files with 124 additions and 243 deletions

View File

@ -1667,15 +1667,15 @@ multiclass AddSubS<bit isSub, string mnemonic, SDNode OpNode, string cmp> {
WZR, GPR32sp:$src, addsub_shifted_imm32:$imm)>;
def : InstAlias<cmp#" $src, $imm", (!cast<Instruction>(NAME#"Xri")
XZR, GPR64sp:$src, addsub_shifted_imm64:$imm)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Wrx")
def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Wrx")
WZR, GPR32sp:$src1, GPR32:$src2, arith_extend:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Xrx")
def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Xrx")
XZR, GPR64sp:$src1, GPR32:$src2, arith_extend:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Xrx64")
def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Xrx64")
XZR, GPR64sp:$src1, GPR64:$src2, arith_extendlsl64:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Wrs")
def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Wrs")
WZR, GPR32:$src1, GPR32:$src2, arith_shift32:$sh)>;
def : InstAlias<cmp#" $src1, $src2, $sh", (!cast<Instruction>(NAME#"Xrs")
def : InstAlias<cmp#" $src1, $src2$sh", (!cast<Instruction>(NAME#"Xrs")
XZR, GPR64:$src1, GPR64:$src2, arith_shift64:$sh)>;
// Compare shorthands

View File

@ -221,71 +221,6 @@ void ARM64InstPrinter::printInst(const MCInst *MI, raw_ostream &O,
return;
}
// FIXME: TableGen should be able to do all of these now.
// ANDS WZR, Wn, Wm{, lshift #imm} ==> TST Wn{, lshift #imm}
// ANDS XZR, Xn, Xm{, lshift #imm} ==> TST Xn{, lshift #imm}
if ((Opcode == ARM64::ANDSWrs && MI->getOperand(0).getReg() == ARM64::WZR) ||
(Opcode == ARM64::ANDSXrs && MI->getOperand(0).getReg() == ARM64::XZR)) {
O << "\ttst\t" << getRegisterName(MI->getOperand(1).getReg()) << ", ";
printShiftedRegister(MI, 2, O);
return;
}
// ORN Wn, WZR, Wm{, lshift #imm} ==> MVN Wn, Wm{, lshift #imm}
// ORN Xn, XZR, Xm{, lshift #imm} ==> MVN Xn, Xm{, lshift #imm}
if ((Opcode == ARM64::ORNWrs && MI->getOperand(1).getReg() == ARM64::WZR) ||
(Opcode == ARM64::ORNXrs && MI->getOperand(1).getReg() == ARM64::XZR)) {
O << "\tmvn\t" << getRegisterName(MI->getOperand(0).getReg()) << ", ";
printShiftedRegister(MI, 2, O);
return;
}
// SUBS WZR, Wn, Wm{, lshift #imm} ==> CMP Wn, Wm{, lshift #imm}
// SUBS XZR, Xn, Xm{, lshift #imm} ==> CMP Xn, Xm{, lshift #imm}
if ((Opcode == ARM64::SUBSWrs && MI->getOperand(0).getReg() == ARM64::WZR) ||
(Opcode == ARM64::SUBSXrs && MI->getOperand(0).getReg() == ARM64::XZR)) {
O << "\tcmp\t" << getRegisterName(MI->getOperand(1).getReg()) << ", ";
printShiftedRegister(MI, 2, O);
return;
}
// SUBS XZR, Xn, Wm, uxtb #imm ==> CMP Xn, uxtb #imm
// SUBS WZR, Wn, Xm, uxtb #imm ==> CMP Wn, uxtb #imm
if ((Opcode == ARM64::SUBSXrx && MI->getOperand(0).getReg() == ARM64::XZR) ||
(Opcode == ARM64::SUBSWrx && MI->getOperand(0).getReg() == ARM64::WZR)) {
O << "\tcmp\t" << getRegisterName(MI->getOperand(1).getReg()) << ", ";
printExtendedRegister(MI, 2, O);
return;
}
// SUBS XZR, Xn, Xm, uxtx #imm ==> CMP Xn, uxtb #imm
if (Opcode == ARM64::SUBSXrx64 && MI->getOperand(0).getReg() == ARM64::XZR) {
O << "\tcmp\t" << getRegisterName(MI->getOperand(1).getReg()) << ", "
<< getRegisterName(MI->getOperand(2).getReg());
printExtend(MI, 3, O);
return;
}
// ADDS WZR, Wn, Wm{, lshift #imm} ==> CMN Wn, Wm{, lshift #imm}
// ADDS XZR, Xn, Xm{, lshift #imm} ==> CMN Xn, Xm{, lshift #imm}
if ((Opcode == ARM64::ADDSWrs && MI->getOperand(0).getReg() == ARM64::WZR) ||
(Opcode == ARM64::ADDSXrs && MI->getOperand(0).getReg() == ARM64::XZR)) {
O << "\tcmn\t" << getRegisterName(MI->getOperand(1).getReg()) << ", ";
printShiftedRegister(MI, 2, O);
return;
}
// ADDS XZR, Xn, Wm, uxtb #imm ==> CMN Xn, uxtb #imm
if (Opcode == ARM64::ADDSXrx && MI->getOperand(0).getReg() == ARM64::XZR) {
O << "\tcmn\t" << getRegisterName(MI->getOperand(1).getReg()) << ", ";
printExtendedRegister(MI, 2, O);
return;
}
// ADDS XZR, Xn, Xm, uxtx #imm ==> CMN Xn, uxtb #imm
if (Opcode == ARM64::ADDSXrx64 && MI->getOperand(0).getReg() == ARM64::XZR) {
O << "\tcmn\t" << getRegisterName(MI->getOperand(1).getReg()) << ", "
<< getRegisterName(MI->getOperand(2).getReg());
printExtend(MI, 3, O);
return;
}
if (!printAliasInstr(MI, O))
printInstruction(MI, O);

View File

@ -15,6 +15,7 @@
#include "Sparc.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;

View File

@ -1,5 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
@var32 = global i32 0
@var64 = global i64 0
@ -36,8 +36,7 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift4a = shl i32 %lhs4a, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsl #15
; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
%rhs5 = load volatile i64* @var64
%shift5 = shl i64 %rhs5, 18
@ -68,8 +67,7 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift8a = shl i64 %lhs8a, 60
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsl #60
; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
ret void
; CHECK: ret
@ -102,8 +100,7 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift4a = lshr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsr #15
; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
%shift5 = lshr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
@ -129,8 +126,7 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift8a = lshr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsr #45
; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
ret void
; CHECK: ret
@ -163,8 +159,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift4a = ashr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, asr #15
; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
%shift5 = ashr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
@ -190,8 +185,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%shift8a = ashr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, asr #45
; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
ret void
; CHECK: ret
@ -252,8 +246,7 @@ define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
br i1 %tst1, label %t2, label %end
; Important that this isn't lowered to a cmn instruction because if %rhs32 ==
; 0 then the results will differ.
; CHECK-AARCH64: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
; CHECK-ARM64: sub [[RHS:w[0-9]+]], wzr, {{w[0-9]+}}, lsl #13
; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
; CHECK: cmp {{w[0-9]+}}, [[RHS]]
t2:
@ -276,8 +269,7 @@ t4:
%tst4 = icmp slt i64 %lhs64, %val4
br i1 %tst4, label %t5, label %end
; Again, it's important that cmn isn't used here in case %rhs64 == 0.
; CHECK-AARCH64: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
; CHECK-ARM64: sub [[RHS:x[0-9]+]], xzr, {{x[0-9]+}}, lsl #43
; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
; CHECK: cmp {{x[0-9]+}}, [[RHS]]
t5:

View File

@ -156,7 +156,7 @@ define i64 @t17(i16 %a, i64 %x) nounwind ssp {
entry:
; CHECK-LABEL: t17:
; CHECK: sxth [[REG:x[0-9]+]], w0
; CHECK: sub x0, xzr, [[REG]], lsl #32
; CHECK: neg x0, [[REG]], lsl #32
; CHECK: ret
%tmp16 = sext i16 %a to i64
%tmp17 = mul i64 %tmp16, -4294967296

View File

@ -60,13 +60,13 @@ declare i8* @llvm.frameaddress(i32) nounwind readnone
define i8* @retaddr() nounwind readnone {
entry:
;V8-LABEL: retaddr:
;V8: or %g0, %o7, {{.+}}
;V8: mov %o7, {{.+}}
;V9-LABEL: retaddr:
;V9: or %g0, %o7, {{.+}}
;V9: mov %o7, {{.+}}
;SPARC64-LABEL: retaddr
;SPARC64: or %g0, %o7, {{.+}}
;SPARC64: mov %o7, {{.+}}
%0 = tail call i8* @llvm.returnaddress(i32 0)
ret i8* %0

View File

@ -60,7 +60,7 @@ entry:
;CHECK: !NO_APP
;CHECK-NEXT: cmp
;CHECK-NEXT: bg
;CHECK-NEXT: or
;CHECK-NEXT: mov
tail call void asm sideeffect "sethi 0, %g0", ""() nounwind
%0 = icmp slt i32 %a, 0
br i1 %0, label %bb, label %bb1

View File

@ -44,7 +44,7 @@ define void @intarg(i8 %a0, ; %i0
; CHECK: sra %i0, 0, [[R:%[gilo][0-7]]]
; CHECK: stx [[R]], [%sp+2223]
; Use %o0-%o5 for outgoing arguments
; CHECK: or %g0, 5, %o5
; CHECK: mov 5, %o5
; CHECK: call intarg
; CHECK-NOT: add %sp
; CHECK: restore
@ -208,7 +208,7 @@ define i32 @inreg_if(float inreg %a0, ; %f0
; CHECK: call_inreg_if
; CHECK: fmovs %f3, %f0
; CHECK: or %g0, %i2, %o0
; CHECK: mov %i2, %o0
; CHECK: call inreg_if
define void @call_inreg_if(i32* %p, float %f3, i32 %i2) {
%x = call i32 @inreg_if(float %f3, i32 %i2)

View File

@ -2,11 +2,11 @@
; RUN: llc < %s -march=sparcv9 -mattr=+popc | FileCheck %s -check-prefix=OPT
; CHECK-LABEL: ret2:
; CHECK: or %g0, %i1, %i0
; CHECK: mov %i1, %i0
; OPT-LABEL: ret2:
; OPT: retl
; OPT: or %g0, %o1, %o0
; OPT: mov %o1, %o0
define i64 @ret2(i64 %a, i64 %b) {
ret i64 %b
}
@ -39,21 +39,21 @@ define i64 @sra_reg(i64 %a, i64 %b) {
; restore %g0, %g0, %o0
;
; CHECK: ret_imm0
; CHECK: or %g0, 0, %i0
; CHECK: mov 0, %i0
; OPT: ret_imm0
; OPT: retl
; OPT: or %g0, 0, %o0
; OPT: mov 0, %o0
define i64 @ret_imm0() {
ret i64 0
}
; CHECK: ret_simm13
; CHECK: or %g0, -4096, %i0
; CHECK: mov -4096, %i0
; OPT: ret_simm13
; OPT: retl
; OPT: or %g0, -4096, %o0
; OPT: mov -4096, %o0
define i64 @ret_simm13() {
ret i64 -4096
}

View File

@ -112,9 +112,9 @@ entry:
; CHECK-LABEL: setcc_resultty
; CHECK-DAG: srax %i0, 63, %o0
; CHECK-DAG: or %g0, %i0, %o1
; CHECK-DAG: or %g0, 0, %o2
; CHECK-DAG: or %g0, 32, %o3
; CHECK-DAG: mov %i0, %o1
; CHECK-DAG: mov 0, %o2
; CHECK-DAG: mov 32, %o3
; CHECK-DAG: call __multi3
; CHECK: cmp
; CHECK: movne %xcc, 1, [[R:%[gilo][0-7]]]

View File

@ -33,7 +33,7 @@ entry:
}
; CHECK-LABEL: test_cmpxchg_i32
; CHECK: or %g0, 123, [[R:%[gilo][0-7]]]
; CHECK: mov 123, [[R:%[gilo][0-7]]]
; CHECK: cas [%o1], %o0, [[R]]
define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
@ -43,7 +43,7 @@ entry:
}
; CHECK-LABEL: test_cmpxchg_i64
; CHECK: or %g0, 123, [[R:%[gilo][0-7]]]
; CHECK: mov 123, [[R:%[gilo][0-7]]]
; CHECK: casx [%o1], %o0, [[R]]
define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
@ -53,7 +53,7 @@ entry:
}
; CHECK-LABEL: test_swap_i32
; CHECK: or %g0, 42, [[R:%[gilo][0-7]]]
; CHECK: mov 42, [[R:%[gilo][0-7]]]
; CHECK: swap [%o1], [[R]]
define i32 @test_swap_i32(i32 %a, i32* %ptr) {

View File

@ -11,7 +11,7 @@ entry:
; CHECK-LABEL: return_int_const:
; CHECK: retl
; CHECK-NEXT: or %g0, 1729, %o0
; CHECK-NEXT: mov 1729, %o0
define i32 @return_int_const() {
entry:
ret i32 1729
@ -58,9 +58,9 @@ entry:
; CHECK-LABEL: leaf_proc_with_local_array:
; CHECK: add %sp, -104, %sp
; CHECK: or %g0, 1, [[R1:%[go][0-7]]]
; CHECK: mov 1, [[R1:%[go][0-7]]]
; CHECK: st [[R1]], [%sp+96]
; CHECK: or %g0, 2, [[R2:%[go][0-7]]]
; CHECK: mov 2, [[R2:%[go][0-7]]]
; CHECK: st [[R2]], [%sp+100]
; CHECK: ld {{.+}}, %o0
; CHECK: retl

View File

@ -2,10 +2,10 @@
; CHECK-LABEL: test
; CHECK: srl %i1, 0, %o2
; CHECK-NEXT: or %g0, %i2, %o0
; CHECK-NEXT: mov %i2, %o0
; CHECK-NEXT: call __ashlti3
; CHECK-NEXT: or %g0, %i3, %o1
; CHECK-NEXT: or %g0, %o0, %i0
; CHECK-NEXT: mov %i3, %o1
; CHECK-NEXT: mov %o0, %i0
define i128 @test(i128 %a, i128 %b) {
entry:

View File

@ -27,7 +27,8 @@ _func:
// CHECK: add x2, x4, w5, uxtb // encoding: [0x82,0x00,0x25,0x8b]
// CHECK: add x20, sp, w19, uxth // encoding: [0xf4,0x23,0x33,0x8b]
// CHECK: add x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0x8b]
// CHECK: add x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0x8b]
// CHECK-AARCH64: add x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0x8b]
// CHECK-ARM64: add x20, x3, x13 // encoding: [0x74,0x60,0x2d,0x8b]
// CHECK: add x17, x25, w20, sxtb // encoding: [0x31,0x83,0x34,0x8b]
// CHECK: add x18, x13, w19, sxth // encoding: [0xb2,0xa1,0x33,0x8b]
// CHECK: add sp, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x8b]
@ -44,7 +45,8 @@ _func:
add w2, w3, w5, sxtx
// CHECK: add w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x0b]
// CHECK: add w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x0b]
// CHECK: add w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x0b]
// CHECK-AARCH64: add w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x0b]
// CHECK-ARM64: add w30, w29, wzr // encoding: [0xbe,0x43,0x3f,0x0b]
// CHECK: add w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x0b]
// CHECK: add w2, w5, w1, sxtb // encoding: [0xa2,0x80,0x21,0x0b]
// CHECK: add w26, w17, w19, sxth // encoding: [0x3a,0xa2,0x33,0x0b]
@ -73,7 +75,8 @@ _func:
// CHECK: sub x2, x4, w5, uxtb #2 // encoding: [0x82,0x08,0x25,0xcb]
// CHECK: sub x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xcb]
// CHECK: sub x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xcb]
// CHECK: sub x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xcb]
// CHECK-AARCH64: sub x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xcb]
// CHECK-ARM64: sub x20, x3, x13 // encoding: [0x74,0x60,0x2d,0xcb]
// CHECK: sub x17, x25, w20, sxtb // encoding: [0x31,0x83,0x34,0xcb]
// CHECK: sub x18, x13, w19, sxth // encoding: [0xb2,0xa1,0x33,0xcb]
// CHECK: sub sp, x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xcb]
@ -89,7 +92,8 @@ _func:
sub w2, w3, w5, sxtx
// CHECK: sub w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x4b]
// CHECK: sub w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x4b]
// CHECK: sub w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x4b]
// CHECK-AARCH64: sub w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x4b]
// CHECK-ARM64: sub w30, w29, wzr // encoding: [0xbe,0x43,0x3f,0x4b]
// CHECK: sub w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x4b]
// CHECK: sub w2, w5, w1, sxtb // encoding: [0xa2,0x80,0x21,0x4b]
// CHECK: sub w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x4b]
@ -108,7 +112,8 @@ _func:
// CHECK: adds x2, x4, w5, uxtb #2 // encoding: [0x82,0x08,0x25,0xab]
// CHECK: adds x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xab]
// CHECK: adds x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xab]
// CHECK: adds x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xab]
// CHECK-AARCH64: adds x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xab]
// CHECK-ARM64: adds x20, x3, x13 // encoding: [0x74,0x60,0x2d,0xab]
// CHECK: {{adds xzr,|cmn}} x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xab]
// CHECK: adds x18, sp, w19, sxth // encoding: [0xf2,0xa3,0x33,0xab]
// CHECK: {{adds xzr,|cmn}} x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xab]
@ -124,11 +129,13 @@ _func:
adds w2, w3, w5, sxtx
// CHECK: adds w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x2b]
// CHECK: adds w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x2b]
// CHECK: adds w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x2b]
// CHECK-AARCH64: adds w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x2b]
// CHECK-ARM64: adds w30, w29, wzr // encoding: [0xbe,0x43,0x3f,0x2b]
// CHECK: adds w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x2b]
// CHECK: adds w2, w5, w1, sxtb #1 // encoding: [0xa2,0x84,0x21,0x2b]
// CHECK: adds w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x2b]
// CHECK: adds wzr, w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
// CHECK-AARCH64: adds wzr, w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
// CHECK-ARM64: cmn w2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0x2b]
// CHECK: adds w2, w3, w5, sxtx // encoding: [0x62,0xe0,0x25,0x2b]
// subs
@ -143,7 +150,8 @@ _func:
// CHECK: subs x2, x4, w5, uxtb #2 // encoding: [0x82,0x08,0x25,0xeb]
// CHECK: subs x20, sp, w19, uxth #4 // encoding: [0xf4,0x33,0x33,0xeb]
// CHECK: subs x12, x1, w20, uxtw // encoding: [0x2c,0x40,0x34,0xeb]
// CHECK: subs x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xeb]
// CHECK-AARCH64: subs x20, x3, x13, uxtx // encoding: [0x74,0x60,0x2d,0xeb]
// CHECK-ARM64: subs x20, x3, x13 // encoding: [0x74,0x60,0x2d,0xeb]
// CHECK: {{subs xzr,|cmp}} x25, w20, sxtb #3 // encoding: [0x3f,0x8f,0x34,0xeb]
// CHECK: subs x18, sp, w19, sxth // encoding: [0xf2,0xa3,0x33,0xeb]
// CHECK: {{subs xzr,|cmp}} x2, w3, sxtw // encoding: [0x5f,0xc0,0x23,0xeb]
@ -159,7 +167,8 @@ _func:
subs w2, w3, w5, sxtx
// CHECK: subs w2, w5, w7, uxtb // encoding: [0xa2,0x00,0x27,0x6b]
// CHECK: subs w21, w15, w17, uxth // encoding: [0xf5,0x21,0x31,0x6b]
// CHECK: subs w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x6b]
// CHECK-AARCH64: subs w30, w29, wzr, uxtw // encoding: [0xbe,0x43,0x3f,0x6b]
// CHECK-ARM64: subs w30, w29, wzr // encoding: [0xbe,0x43,0x3f,0x6b]
// CHECK: subs w19, w17, w1, uxtx // encoding: [0x33,0x62,0x21,0x6b]
// CHECK: subs w2, w5, w1, sxtb #1 // encoding: [0xa2,0x84,0x21,0x6b]
// CHECK: subs w26, wsp, w19, sxth // encoding: [0xfa,0xa3,0x33,0x6b]
@ -256,7 +265,8 @@ _func:
// CHECK: sub sp, x3, x7, lsl #4 // encoding: [0x7f,0x70,0x27,0xcb]
// CHECK: add w2, wsp, w3, lsl #1 // encoding: [0xe2,0x47,0x23,0x0b]
// CHECK: cmp wsp, w9 // encoding: [0xff,0x43,0x29,0x6b]
// CHECK: adds wzr, wsp, w3, lsl #4 // encoding: [0xff,0x53,0x23,0x2b]
// CHECK-AARCH64: adds wzr, wsp, w3, lsl #4 // encoding: [0xff,0x53,0x23,0x2b]
// CHECK-ARM64: cmn wsp, w3, lsl #4 // encoding: [0xff,0x53,0x23,0x2b]
// CHECK: subs x3, sp, x9, lsl #2 // encoding: [0xe3,0x6b,0x29,0xeb]
//------------------------------------------------------------------------------
@ -735,30 +745,22 @@ _func:
// CHECK-AARCH64: neg w24, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x4b]
// CHECK-ARM64: neg w28, w27 // encoding: [0xfc,0x03,0x1b,0x4b]
// CHECK-ARM64: sub w26, wzr, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x4b]
// CHECK-ARM64: sub w24, wzr, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x4b]
// CHECK-ARM64: neg w26, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x4b]
// CHECK-ARM64: neg w24, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x4b]
neg w22, w21, lsr #0
neg w20, w19, lsr #1
neg w18, w17, lsr #31
// CHECK-AARCH64: neg w22, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x4b]
// CHECK-AARCH64: neg w20, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x4b]
// CHECK-AARCH64: neg w18, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x4b]
// CHECK-ARM64: sub w22, wzr, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x4b]
// CHECK-ARM64: sub w20, wzr, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x4b]
// CHECK-ARM64: sub w18, wzr, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x4b]
// CHECK: neg w22, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x4b]
// CHECK: neg w20, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x4b]
// CHECK: neg w18, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x4b]
neg w16, w15, asr #0
neg w14, w13, asr #12
neg w12, w11, asr #31
// CHECK-AARCH64: neg w16, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x4b]
// CHECK-AARCH64: neg w14, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x4b]
// CHECK-AARCH64: neg w12, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x4b]
// CHECK-ARM64: sub w16, wzr, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x4b]
// CHECK-ARM64: sub w14, wzr, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x4b]
// CHECK-ARM64: sub w12, wzr, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x4b]
// CHECK: neg w16, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x4b]
// CHECK: neg w14, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x4b]
// CHECK: neg w12, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x4b]
neg x29, x30
neg x30, xzr
@ -778,30 +780,22 @@ _func:
// CHECK-AARCH64: neg x24, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xcb]
// CHECK-ARM64: neg x28, x27 // encoding: [0xfc,0x03,0x1b,0xcb]
// CHECK-ARM64: sub x26, xzr, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xcb]
// CHECK-ARM64: sub x24, xzr, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xcb]
// CHECK-ARM64: neg x26, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xcb]
// CHECK-ARM64: neg x24, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xcb]
neg x22, x21, lsr #0
neg x20, x19, lsr #1
neg x18, x17, lsr #31
// CHECK-AARCH64: neg x22, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xcb]
// CHECK-AARCH64: neg x20, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xcb]
// CHECK-AARCH64: neg x18, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xcb]
// CHECK-ARM64: sub x22, xzr, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xcb]
// CHECK-ARM64: sub x20, xzr, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xcb]
// CHECK-ARM64: sub x18, xzr, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xcb]
// CHECK: neg x22, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xcb]
// CHECK: neg x20, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xcb]
// CHECK: neg x18, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xcb]
neg x16, x15, asr #0
neg x14, x13, asr #12
neg x12, x11, asr #31
// CHECK-AARCH64: neg x16, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xcb]
// CHECK-AARCH64: neg x14, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xcb]
// CHECK-AARCH64: neg x12, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xcb]
// CHECK-ARM64: sub x16, xzr, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xcb]
// CHECK-ARM64: sub x14, xzr, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xcb]
// CHECK-ARM64: sub x12, xzr, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xcb]
// CHECK: neg x16, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xcb]
// CHECK: neg x14, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xcb]
// CHECK: neg x12, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xcb]
negs w29, w30
negs w30, wzr
@ -821,30 +815,22 @@ _func:
// CHECK-AARCH64: negs w24, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x6b]
// CHECK-ARM64: negs w28, w27 // encoding: [0xfc,0x03,0x1b,0x6b]
// CHECK-ARM64: subs w26, wzr, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x6b]
// CHECK-ARM64: subs w24, wzr, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x6b]
// CHECK-ARM64: negs w26, w25, lsl #29 // encoding: [0xfa,0x77,0x19,0x6b]
// CHECK-ARM64: negs w24, w23, lsl #31 // encoding: [0xf8,0x7f,0x17,0x6b]
negs w22, w21, lsr #0
negs w20, w19, lsr #1
negs w18, w17, lsr #31
// CHECK-AARCH64: negs w22, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x6b]
// CHECK-AARCH64: negs w20, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x6b]
// CHECK-AARCH64: negs w18, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x6b]
// CHECK-ARM64: subs w22, wzr, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x6b]
// CHECK-ARM64: subs w20, wzr, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x6b]
// CHECK-ARM64: subs w18, wzr, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x6b]
// CHECK: negs w22, w21, lsr #0 // encoding: [0xf6,0x03,0x55,0x6b]
// CHECK: negs w20, w19, lsr #1 // encoding: [0xf4,0x07,0x53,0x6b]
// CHECK: negs w18, w17, lsr #31 // encoding: [0xf2,0x7f,0x51,0x6b]
negs w16, w15, asr #0
negs w14, w13, asr #12
negs w12, w11, asr #31
// CHECK-AARCH64: negs w16, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x6b]
// CHECK-AARCH64: negs w14, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x6b]
// CHECK-AARCH64: negs w12, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x6b]
// CHECK-ARM64: subs w16, wzr, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x6b]
// CHECK-ARM64: subs w14, wzr, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x6b]
// CHECK-ARM64: subs w12, wzr, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x6b]
// CHECK: negs w16, w15, asr #0 // encoding: [0xf0,0x03,0x8f,0x6b]
// CHECK: negs w14, w13, asr #12 // encoding: [0xee,0x33,0x8d,0x6b]
// CHECK: negs w12, w11, asr #31 // encoding: [0xec,0x7f,0x8b,0x6b]
negs x29, x30
negs x30, xzr
@ -864,30 +850,22 @@ _func:
// CHECK-AARCH64: negs x24, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xeb]
// CHECK-ARM64: negs x28, x27 // encoding: [0xfc,0x03,0x1b,0xeb]
// CHECK-ARM64: subs x26, xzr, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xeb]
// CHECK-ARM64: subs x24, xzr, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xeb]
// CHECK-ARM64: negs x26, x25, lsl #29 // encoding: [0xfa,0x77,0x19,0xeb]
// CHECK-ARM64: negs x24, x23, lsl #31 // encoding: [0xf8,0x7f,0x17,0xeb]
negs x22, x21, lsr #0
negs x20, x19, lsr #1
negs x18, x17, lsr #31
// CHECK-AARCH64: negs x22, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xeb]
// CHECK-AARCH64: negs x20, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xeb]
// CHECK-AARCH64: negs x18, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xeb]
// CHECK-ARM64: subs x22, xzr, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xeb]
// CHECK-ARM64: subs x20, xzr, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xeb]
// CHECK-ARM64: subs x18, xzr, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xeb]
// CHECK: negs x22, x21, lsr #0 // encoding: [0xf6,0x03,0x55,0xeb]
// CHECK: negs x20, x19, lsr #1 // encoding: [0xf4,0x07,0x53,0xeb]
// CHECK: negs x18, x17, lsr #31 // encoding: [0xf2,0x7f,0x51,0xeb]
negs x16, x15, asr #0
negs x14, x13, asr #12
negs x12, x11, asr #31
// CHECK-AARCH64: negs x16, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xeb]
// CHECK-AARCH64: negs x14, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xeb]
// CHECK-AARCH64: negs x12, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xeb]
// CHECK-ARM64: subs x16, xzr, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xeb]
// CHECK-ARM64: subs x14, xzr, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xeb]
// CHECK-ARM64: subs x12, xzr, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xeb]
// CHECK: negs x16, x15, asr #0 // encoding: [0xf0,0x03,0x8f,0xeb]
// CHECK: negs x14, x13, asr #12 // encoding: [0xee,0x33,0x8d,0xeb]
// CHECK: negs x12, x11, asr #31 // encoding: [0xec,0x7f,0x8b,0xeb]
//------------------------------------------------------------------------------
// Add-sub (shifted register)

View File

@ -113,19 +113,19 @@ foo:
neg w0, w1
; CHECK: neg w0, w1
neg w0, w1, lsl #1
; CHECK: sub w0, wzr, w1, lsl #1
; CHECK: neg w0, w1, lsl #1
neg x0, x1
; CHECK: neg x0, x1
neg x0, x1, asr #1
; CHECK: sub x0, xzr, x1, asr #1
; CHECK: neg x0, x1, asr #1
negs w0, w1
; CHECK: negs w0, w1
negs w0, w1, lsl #1
; CHECK: subs w0, wzr, w1, lsl #1
; CHECK: negs w0, w1, lsl #1
negs x0, x1
; CHECK: negs x0, x1
negs x0, x1, asr #1
; CHECK: subs x0, xzr, x1, asr #1
; CHECK: negs x0, x1, asr #1
;-----------------------------------------------------------------------------
; MOV aliases

View File

@ -178,7 +178,7 @@ foo:
; CHECK: add w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x0b]
; CHECK: add w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x0b]
; CHECK: add w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x0b]
; CHECK: add w1, w2, w3 ; encoding: [0x41,0x40,0x23,0x0b]
; CHECK: add w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x0b]
; CHECK: add w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x0b]
; CHECK: add w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x0b]
@ -222,7 +222,7 @@ foo:
; CHECK: sub w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x4b]
; CHECK: sub w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x4b]
; CHECK: sub w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x4b]
; CHECK: sub w1, w2, w3 ; encoding: [0x41,0x40,0x23,0x4b]
; CHECK: sub w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x4b]
; CHECK: sub w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x4b]
; CHECK: sub w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x4b]
@ -266,7 +266,7 @@ foo:
; CHECK: adds w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x2b]
; CHECK: adds w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x2b]
; CHECK: adds w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x2b]
; CHECK: adds w1, w2, w3 ; encoding: [0x41,0x40,0x23,0x2b]
; CHECK: adds w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x2b]
; CHECK: adds w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x2b]
; CHECK: adds w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x2b]
@ -297,7 +297,7 @@ foo:
; CHECK: adds w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x2b]
; CHECK: adds w1, wsp, w3 ; encoding: [0xe1,0x43,0x23,0x2b]
; CHECK: adds wzr, wsp, w3, lsl #4 ; encoding: [0xff,0x53,0x23,0x2b]
; CHECK: cmn wsp, w3, lsl #4 ; encoding: [0xff,0x53,0x23,0x2b]
subs w1, w2, w3, uxtb
subs w1, w2, w3, uxth
@ -310,7 +310,7 @@ foo:
; CHECK: subs w1, w2, w3, uxtb ; encoding: [0x41,0x00,0x23,0x6b]
; CHECK: subs w1, w2, w3, uxth ; encoding: [0x41,0x20,0x23,0x6b]
; CHECK: subs w1, w2, w3, uxtw ; encoding: [0x41,0x40,0x23,0x6b]
; CHECK: subs w1, w2, w3 ; encoding: [0x41,0x40,0x23,0x6b]
; CHECK: subs w1, w2, w3, uxtx ; encoding: [0x41,0x60,0x23,0x6b]
; CHECK: subs w1, w2, w3, sxtb ; encoding: [0x41,0x80,0x23,0x6b]
; CHECK: subs w1, w2, w3, sxth ; encoding: [0x41,0xa0,0x23,0x6b]

View File

@ -172,7 +172,7 @@
# CHECK: add w1, w2, w3, uxtb
# CHECK: add w1, w2, w3, uxth
# CHECK: add w1, w2, w3, uxtw
# CHECK: add w1, w2, w3
# CHECK: add w1, w2, w3, uxtx
# CHECK: add w1, w2, w3, sxtb
# CHECK: add w1, w2, w3, sxth
@ -214,7 +214,7 @@
# CHECK: sub w1, w2, w3, uxtb
# CHECK: sub w1, w2, w3, uxth
# CHECK: sub w1, w2, w3, uxtw
# CHECK: sub w1, w2, w3
# CHECK: sub w1, w2, w3, uxtx
# CHECK: sub w1, w2, w3, sxtb
# CHECK: sub w1, w2, w3, sxth
@ -256,7 +256,7 @@
# CHECK: adds w1, w2, w3, uxtb
# CHECK: adds w1, w2, w3, uxth
# CHECK: adds w1, w2, w3, uxtw
# CHECK: adds w1, w2, w3
# CHECK: adds w1, w2, w3, uxtx
# CHECK: adds w1, w2, w3, sxtb
# CHECK: adds w1, w2, w3, sxth
@ -294,7 +294,7 @@
# CHECK: subs w1, w2, w3, uxtb
# CHECK: subs w1, w2, w3, uxth
# CHECK: subs w1, w2, w3, uxtw
# CHECK: subs w1, w2, w3
# CHECK: subs w1, w2, w3, uxtx
# CHECK: subs w1, w2, w3, sxtb
# CHECK: subs w1, w2, w3, sxth

View File

@ -120,13 +120,13 @@
# CHECK: fdivq %f0, %f4, %f8
0x91 0xa0 0x09 0xe4
# CHECK: fcmps %fcc0, %f0, %f4
# CHECK: fcmps %f0, %f4
0x81 0xa8 0x0a 0x24
# CHECK: fcmpd %fcc0, %f0, %f4
# CHECK: fcmpd %f0, %f4
0x81 0xa8 0x0a 0x44
# CHECK: fcmpq %fcc0, %f0, %f4
# CHECK: fcmpq %f0, %f4
0x81 0xa8 0x0a 0x64
# CHECK: fxtos %f0, %f4

View File

@ -70,10 +70,10 @@
! CHECK: subxcc %g1, %g2, %g3 ! encoding: [0x86,0xe0,0x40,0x02]
subxcc %g1, %g2, %g3
! CHECK: or %g0, %g1, %g3 ! encoding: [0x86,0x10,0x00,0x01]
! CHECK: mov %g1, %g3 ! encoding: [0x86,0x10,0x00,0x01]
mov %g1, %g3
! CHECK: or %g0, 255, %g3 ! encoding: [0x86,0x10,0x20,0xff]
! CHECK: mov 255, %g3 ! encoding: [0x86,0x10,0x20,0xff]
mov 0xff, %g3
! CHECK: restore ! encoding: [0x81,0xe8,0x00,0x00]

View File

@ -96,16 +96,16 @@
fdivd %f0, %f4, %f8
fdivq %f0, %f4, %f8
! CHECK: fcmps %fcc0, %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x24]
! CHECK: fcmpd %fcc0, %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x44]
! CHECK: fcmpq %fcc0, %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x64]
! CHECK: fcmps %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x24]
! CHECK: fcmpd %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x44]
! CHECK: fcmpq %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0x64]
fcmps %f0, %f4
fcmpd %f0, %f4
fcmpq %f0, %f4
! CHECK: fcmpes %fcc0, %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xa4]
! CHECK: fcmped %fcc0, %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xc4]
! CHECK: fcmpeq %fcc0, %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xe4]
! CHECK: fcmpes %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xa4]
! CHECK: fcmped %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xc4]
! CHECK: fcmpeq %f0, %f4 ! encoding: [0x81,0xa8,0x0a,0xe4]
fcmpes %f0, %f4
fcmped %f0, %f4
fcmpeq %f0, %f4

View File

@ -742,37 +742,12 @@ public:
} // end anonymous namespace
static unsigned CountNumOperands(StringRef AsmString) {
unsigned NumOps = 0;
std::pair<StringRef, StringRef> ASM = AsmString.split(' ');
static unsigned CountNumOperands(StringRef AsmString, unsigned Variant) {
std::string FlatAsmString =
CodeGenInstruction::FlattenAsmStringVariants(AsmString, Variant);
AsmString = FlatAsmString;
while (!ASM.second.empty()) {
++NumOps;
ASM = ASM.second.split(' ');
}
return NumOps;
}
static unsigned CountResultNumOperands(StringRef AsmString) {
unsigned NumOps = 0;
std::pair<StringRef, StringRef> ASM = AsmString.split('\t');
if (!ASM.second.empty()) {
size_t I = ASM.second.find('{');
StringRef Str = ASM.second;
if (I != StringRef::npos)
Str = ASM.second.substr(I, ASM.second.find('|', I));
ASM = Str.split(' ');
do {
++NumOps;
ASM = ASM.second.split(' ');
} while (!ASM.second.empty());
}
return NumOps;
return AsmString.count(' ') + AsmString.count('\t');
}
void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
@ -818,10 +793,10 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) {
const CodeGenInstAlias *CGA = *II;
unsigned LastOpNo = CGA->ResultInstOperandIndex.size();
unsigned NumResultOps =
CountResultNumOperands(CGA->ResultInst->AsmString);
CountNumOperands(CGA->ResultInst->AsmString, Variant);
// Don't emit the alias if it has more operands than what it's aliasing.
if (NumResultOps < CountNumOperands(CGA->AsmString))
if (NumResultOps < CountNumOperands(CGA->AsmString, Variant))
continue;
IAPrinter *IAP = new IAPrinter(CGA->Result->getAsString(),