mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-01-23 01:20:03 +00:00
Add an operand to memory intrinsics to denote the "tail" marker.
We need to propagate this information from the IR in order to be able to safely do tail call optimizations on the intrinsics during legalization. Assuming it's safe to do tail call opt without checking for the marker isn't safe because the mem libcall may use allocas from the caller. This adds an extra immediate operand to the end of the intrinsics and fixes the legalizer to handle it. Differential Revision: https://reviews.llvm.org/D68151 llvm-svn: 373140
This commit is contained in:
parent
76f44f6b53
commit
509a4947c9
llvm
lib/CodeGen
test
CodeGen
AArch64/GlobalISel
arm64-irtranslator.llinline-memcpy.mirinline-memmove.mirinline-memset.mirinline-small-memcpy.mirlegalize-memcpy-et-al.mirmemcpy_chk_no_tail.ll
Mips/GlobalISel
X86/GlobalISel
MachineVerifier
@ -1143,6 +1143,11 @@ bool IRTranslator::translateMemFunc(const CallInst &CI,
|
||||
DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1);
|
||||
}
|
||||
|
||||
// We need to propagate the tail call flag from the IR inst as an argument.
|
||||
// Otherwise, we have to pessimize and assume later that we cannot tail call
|
||||
// any memory intrinsics.
|
||||
ICall.addImm(CI.isTailCall() ? 1 : 0);
|
||||
|
||||
// Create mem operands to store the alignment and volatile info.
|
||||
auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
|
||||
ICall.addMemOperand(MF->getMachineMemOperand(
|
||||
|
@ -395,7 +395,8 @@ llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
||||
auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
|
||||
|
||||
SmallVector<CallLowering::ArgInfo, 3> Args;
|
||||
for (unsigned i = 1; i < MI.getNumOperands(); i++) {
|
||||
// Add all the args, except for the last which is an imm denoting 'tail'.
|
||||
for (unsigned i = 1; i < MI.getNumOperands() - 1; i++) {
|
||||
Register Reg = MI.getOperand(i).getReg();
|
||||
|
||||
// Need derive an IR type for call lowering.
|
||||
@ -433,7 +434,8 @@ llvm::createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
||||
Info.CallConv = TLI.getLibcallCallingConv(RTLibcall);
|
||||
Info.Callee = MachineOperand::CreateES(Name);
|
||||
Info.OrigRet = CallLowering::ArgInfo({0}, Type::getVoidTy(Ctx));
|
||||
Info.IsTailCall = isLibCallInTailPosition(MI);
|
||||
Info.IsTailCall = MI.getOperand(MI.getNumOperands() - 1).getImm() == 1 &&
|
||||
isLibCallInTailPosition(MI);
|
||||
|
||||
std::copy(Args.begin(), Args.end(), std::back_inserter(Info.OrigArgs));
|
||||
if (!CLI.lowerCall(MIRBuilder, Info))
|
||||
|
@ -1368,6 +1368,20 @@ void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch (IntrID) {
|
||||
case Intrinsic::memcpy:
|
||||
if (MI->getNumOperands() != 5)
|
||||
report("Expected memcpy intrinsic to have 5 operands", MI);
|
||||
break;
|
||||
case Intrinsic::memmove:
|
||||
if (MI->getNumOperands() != 5)
|
||||
report("Expected memmove intrinsic to have 5 operands", MI);
|
||||
break;
|
||||
case Intrinsic::memset:
|
||||
if (MI->getNumOperands() != 5)
|
||||
report("Expected memset intrinsic to have 5 operands", MI);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TargetOpcode::G_SEXT_INREG: {
|
||||
|
@ -1134,18 +1134,28 @@ define void @test_memcpy(i8* %dst, i8* %src, i64 %size) {
|
||||
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
|
||||
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test_memcpy_tail(i8* %dst, i8* %src, i64 %size) {
|
||||
; CHECK-LABEL: name: test_memcpy_tail
|
||||
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 1 :: (store 1 into %ir.dst), (load 1 from %ir.src)
|
||||
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)*, i8 addrspace(1)*, i64, i1)
|
||||
define void @test_memcpy_nonzero_as(i8 addrspace(1)* %dst, i8 addrspace(1) * %src, i64 %size) {
|
||||
; CHECK-LABEL: name: test_memcpy_nonzero_as
|
||||
; CHECK: [[DST:%[0-9]+]]:_(p1) = COPY $x0
|
||||
; CHECK: [[SRC:%[0-9]+]]:_(p1) = COPY $x1
|
||||
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p1), [[SRC]](p1), [[SIZE]](s64) :: (store 1 into %ir.dst, addrspace 1), (load 1 from %ir.src, addrspace 1)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[DST]](p1), [[SRC]](p1), [[SIZE]](s64), 0 :: (store 1 into %ir.dst, addrspace 1), (load 1 from %ir.src, addrspace 1)
|
||||
call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* %dst, i8 addrspace(1)* %src, i64 %size, i1 0)
|
||||
ret void
|
||||
}
|
||||
@ -1156,7 +1166,7 @@ define void @test_memmove(i8* %dst, i8* %src, i64 %size) {
|
||||
; CHECK: [[DST:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[SRC:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64) :: (store 1 into %ir.dst), (load 1 from %ir.src)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[DST]](p0), [[SRC]](p0), [[SIZE]](s64), 0 :: (store 1 into %ir.dst), (load 1 from %ir.src)
|
||||
call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64 %size, i1 0)
|
||||
ret void
|
||||
}
|
||||
@ -1168,7 +1178,7 @@ define void @test_memset(i8* %dst, i8 %val, i64 %size) {
|
||||
; CHECK: [[SRC_C:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; CHECK: [[SRC:%[0-9]+]]:_(s8) = G_TRUNC [[SRC_C]]
|
||||
; CHECK: [[SIZE:%[0-9]+]]:_(s64) = COPY $x2
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[DST]](p0), [[SRC]](s8), [[SIZE]](s64) :: (store 1 into %ir.dst)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[DST]](p0), [[SRC]](s8), [[SIZE]](s64), 0 :: (store 1 into %ir.dst)
|
||||
call void @llvm.memset.p0i8.i64(i8* %dst, i8 %val, i64 %size, i1 0)
|
||||
ret void
|
||||
}
|
||||
|
@ -52,12 +52,12 @@ body: |
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = COPY $x2
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -104,7 +104,7 @@ body: |
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = G_CONSTANT i64 72
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -171,7 +171,7 @@ body: |
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = G_CONSTANT i64 143
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
|
@ -55,12 +55,12 @@ body: |
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[COPY2]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = COPY $x2
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -94,7 +94,7 @@ body: |
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = G_CONSTANT i64 48
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -111,12 +111,12 @@ body: |
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 96
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = G_CONSTANT i64 96
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -156,7 +156,7 @@ body: |
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = G_CONSTANT i64 52
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
|
@ -54,14 +54,14 @@ body: |
|
||||
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
|
||||
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
|
||||
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64) :: (store 1 into %ir.dst)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), [[COPY]](p0), [[TRUNC]](s8), [[ZEXT]](s64), 1 :: (store 1 into %ir.dst)
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(s32) = COPY $w1
|
||||
%2:_(s32) = COPY $w2
|
||||
%3:_(s8) = G_TRUNC %1(s32)
|
||||
%4:_(s64) = G_ZEXT %2(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64) :: (store 1 into %ir.dst)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64), 1 :: (store 1 into %ir.dst)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -90,7 +90,7 @@ body: |
|
||||
%1:_(s32) = COPY $w1
|
||||
%3:_(s64) = G_CONSTANT i64 16
|
||||
%2:_(s8) = G_TRUNC %1(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %2(s8), %3(s64) :: (store 1 into %ir.dst)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %2(s8), %3(s64), 1 :: (store 1 into %ir.dst)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -114,7 +114,7 @@ body: |
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(s8) = G_CONSTANT i8 64
|
||||
%2:_(s64) = G_CONSTANT i64 16
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64) :: (store 1 into %ir.dst)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -142,7 +142,7 @@ body: |
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(s8) = G_CONSTANT i8 64
|
||||
%2:_(s64) = G_CONSTANT i64 18
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64) :: (store 1 into %ir.dst)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %1(s8), %2(s64), 1 :: (store 1 into %ir.dst)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
|
@ -53,7 +53,7 @@ body: |
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = G_CONSTANT i64 32
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -75,12 +75,12 @@ body: |
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 36
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY]](p0), [[COPY1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY]](p0), [[COPY1]](p0), [[C]](s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s64) = G_CONSTANT i64 36
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %2(s64), 1 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
|
@ -13,20 +13,43 @@ body: |
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
|
||||
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
|
||||
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[COPY1]](p0)
|
||||
; CHECK: $x2 = COPY [[ZEXT]](s64)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[C1]], [[C]]
|
||||
; CHECK: $w3 = COPY [[AND]](s32)
|
||||
; CHECK: TCRETURNdi &memcpy, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $w3
|
||||
; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s32) = COPY $w2
|
||||
%4:_(s1) = G_CONSTANT i1 false
|
||||
%3:_(s64) = G_ZEXT %2(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), %4(s1)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 0
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
---
|
||||
name: test_memcpy_tail
|
||||
tracksRegLiveness: true
|
||||
body: |
|
||||
bb.1:
|
||||
liveins: $w2, $x0, $x1
|
||||
|
||||
; CHECK-LABEL: name: test_memcpy_tail
|
||||
; CHECK: liveins: $w2, $x0, $x1
|
||||
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
|
||||
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[COPY1]](p0)
|
||||
; CHECK: $x2 = COPY [[ZEXT]](s64)
|
||||
; CHECK: TCRETURNdi &memcpy, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1, implicit $x2
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s32) = COPY $w2
|
||||
%3:_(s64) = G_ZEXT %2(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 1
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -43,20 +66,18 @@ body: |
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
|
||||
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
|
||||
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
|
||||
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[COPY1]](p0)
|
||||
; CHECK: $x2 = COPY [[ZEXT]](s64)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[C1]], [[C]]
|
||||
; CHECK: $w3 = COPY [[AND]](s32)
|
||||
; CHECK: TCRETURNdi &memmove, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $w3
|
||||
; CHECK: BL &memmove, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s32) = COPY $w2
|
||||
%4:_(s1) = G_CONSTANT i1 false
|
||||
%3:_(s64) = G_ZEXT %2(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %3(s64), %4(s1)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %1(p0), %3(s64), 0
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -73,22 +94,20 @@ body: |
|
||||
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
|
||||
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
|
||||
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
|
||||
; CHECK: $w1 = COPY [[COPY3]](s32)
|
||||
; CHECK: $x2 = COPY [[ZEXT]](s64)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[C1]], [[C]]
|
||||
; CHECK: $w3 = COPY [[AND]](s32)
|
||||
; CHECK: TCRETURNdi &memset, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $w1, implicit $x2, implicit $w3
|
||||
; CHECK: BL &memset, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1, implicit $x2
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: RET_ReallyLR
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(s32) = COPY $w1
|
||||
%2:_(s32) = COPY $w2
|
||||
%5:_(s1) = G_CONSTANT i1 false
|
||||
%3:_(s8) = G_TRUNC %1(s32)
|
||||
%4:_(s64) = G_ZEXT %2(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64), %5(s1)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %3(s8), %4(s64), 0
|
||||
RET_ReallyLR
|
||||
|
||||
...
|
||||
@ -109,20 +128,15 @@ body: |
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[COPY1]](p0)
|
||||
; CHECK: $x2 = COPY [[ZEXT]](s64)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[C1]], [[C]]
|
||||
; CHECK: $w3 = COPY [[AND]](s32)
|
||||
; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $w3
|
||||
; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: $x0 = COPY [[ZEXT]](s64)
|
||||
; CHECK: RET_ReallyLR implicit $x0
|
||||
%0:_(p0) = COPY $x0
|
||||
%1:_(p0) = COPY $x1
|
||||
%2:_(s32) = COPY $w2
|
||||
%4:_(s1) = G_CONSTANT i1 false
|
||||
%3:_(s64) = G_ZEXT %2(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), %4(s1)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 1
|
||||
$x0 = COPY %3
|
||||
RET_ReallyLR implicit $x0
|
||||
|
||||
@ -143,11 +157,7 @@ body: |
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[COPY1]](p0)
|
||||
; CHECK: $x2 = COPY [[ZEXT]](s64)
|
||||
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[C1]], [[C]]
|
||||
; CHECK: $w3 = COPY [[AND]](s32)
|
||||
; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit $w3
|
||||
; CHECK: BL &memcpy, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: TCRETURNdi &memset, 0, csr_aarch64_aapcs, implicit $sp
|
||||
%0:_(p0) = COPY $x0
|
||||
@ -155,5 +165,5 @@ body: |
|
||||
%2:_(s32) = COPY $w2
|
||||
%4:_(s1) = G_CONSTANT i1 false
|
||||
%3:_(s64) = G_ZEXT %2(s32)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), %4(s1)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %1(p0), %3(s64), 1
|
||||
TCRETURNdi &memset, 0, csr_aarch64_aapcs, implicit $sp
|
||||
|
30
llvm/test/CodeGen/AArch64/GlobalISel/memcpy_chk_no_tail.ll
Normal file
30
llvm/test/CodeGen/AArch64/GlobalISel/memcpy_chk_no_tail.ll
Normal file
@ -0,0 +1,30 @@
|
||||
; RUN: llc -global-isel -verify-machineinstrs %s -o - | FileCheck %s
|
||||
|
||||
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
|
||||
target triple = "arm64-apple-ios13.0.0"
|
||||
|
||||
%struct.int_sqrt = type { i32, i32 }
|
||||
|
||||
; Function Attrs: nounwind optsize ssp uwtable
|
||||
; CHECK-LABEL: @usqrt
|
||||
; CHECK-NOT: b memcpy
|
||||
; CHECK: bl _memcpy
|
||||
define void @usqrt(i32 %x, %struct.int_sqrt* %q) local_unnamed_addr #0 {
|
||||
%a = alloca i32, align 4
|
||||
%bc = bitcast i32* %a to i8*
|
||||
%bc2 = bitcast %struct.int_sqrt* %q to i8*
|
||||
%obj = tail call i64 @llvm.objectsize.i64.p0i8(i8* %bc2, i1 false, i1 true, i1 false)
|
||||
%call = call i8* @__memcpy_chk(i8* %bc2, i8* nonnull %bc, i64 1000, i64 %obj) #4
|
||||
ret void
|
||||
}
|
||||
|
||||
; Function Attrs: nofree nounwind optsize
|
||||
declare i8* @__memcpy_chk(i8*, i8*, i64, i64) local_unnamed_addr #2
|
||||
|
||||
; Function Attrs: nounwind readnone speculatable willreturn
|
||||
declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg) #3
|
||||
attributes #0 = { optsize "disable-tail-calls"="false" "frame-pointer"="all" }
|
||||
attributes #2 = { nofree nounwind "disable-tail-calls"="false" "frame-pointer"="all" }
|
||||
attributes #3 = { nounwind readnone speculatable willreturn }
|
||||
attributes #4 = { nounwind optsize }
|
||||
|
@ -153,7 +153,7 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s
|
||||
; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
|
||||
; MIPS32: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
|
||||
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
|
||||
; MIPS32: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32) :: (store 1 into %ir.dest), (load 1 from %ir.src)
|
||||
; MIPS32: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store 1 into %ir.dest), (load 1 from %ir.src)
|
||||
; MIPS32: RetRA
|
||||
; MIPS32_PIC-LABEL: name: call_symbol
|
||||
; MIPS32_PIC: bb.1.entry:
|
||||
@ -161,7 +161,7 @@ define void @call_symbol(i8* nocapture readonly %src, i8* nocapture %dest, i32 s
|
||||
; MIPS32_PIC: [[COPY:%[0-9]+]]:_(p0) = COPY $a0
|
||||
; MIPS32_PIC: [[COPY1:%[0-9]+]]:_(p0) = COPY $a1
|
||||
; MIPS32_PIC: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
|
||||
; MIPS32_PIC: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32) :: (store 1 into %ir.dest), (load 1 from %ir.src)
|
||||
; MIPS32_PIC: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[COPY1]](p0), [[COPY]](p0), [[COPY2]](s32), 0 :: (store 1 into %ir.dest), (load 1 from %ir.src)
|
||||
; MIPS32_PIC: RetRA
|
||||
entry:
|
||||
call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 %length, i1 false)
|
||||
|
@ -73,7 +73,7 @@ body: |
|
||||
%8:_(s32) = G_CONSTANT i32 -8
|
||||
%9:_(s32) = G_AND %7, %8
|
||||
%10:_(p0) = G_DYN_STACKALLOC %9(s32), 0
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %10(p0), %0(s8), %1(s32) :: (store 1 into %ir.vla)
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %10(p0), %0(s8), %1(s32), 0 :: (store 1 into %ir.vla)
|
||||
%11:_(p0) = G_GEP %10, %1(s32)
|
||||
%12:_(p0) = COPY %11(p0)
|
||||
G_STORE %13(s8), %12(p0) :: (store 1 into %ir.arrayidx)
|
||||
|
@ -19,7 +19,7 @@ define float @test_return_f1(float %f.coerce) {
|
||||
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
|
||||
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.f
|
||||
; ALL: G_STORE [[TRUNC]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13)
|
||||
; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s32)
|
||||
; ALL: $xmm0 = COPY [[ANYEXT]](s128)
|
||||
@ -49,7 +49,7 @@ define double @test_return_d1(double %d.coerce) {
|
||||
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
|
||||
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.d
|
||||
; ALL: G_STORE [[TRUNC]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.coerce.dive2)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 8), (load 1 from %ir.1, align 8)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 8), (load 1 from %ir.1, align 8)
|
||||
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.coerce.dive13)
|
||||
; ALL: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s64)
|
||||
; ALL: $xmm0 = COPY [[ANYEXT]](s128)
|
||||
@ -82,7 +82,7 @@ define { double, double } @test_return_d2(double %d.coerce0, double %d.coerce1)
|
||||
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C1]](s64)
|
||||
; ALL: G_STORE [[TRUNC1]](s64), [[GEP]](p0) :: (store 8 into %ir.2)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.3, align 8), (load 1 from %ir.4, align 8)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.3, align 8), (load 1 from %ir.4, align 8)
|
||||
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5)
|
||||
; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C1]](s64)
|
||||
; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8)
|
||||
@ -116,7 +116,7 @@ define i32 @test_return_i1(i32 %i.coerce) {
|
||||
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
|
||||
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
|
||||
; ALL: G_STORE [[COPY]](s32), [[FRAME_INDEX1]](p0) :: (store 4 into %ir.coerce.dive2)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.0, align 4), (load 1 from %ir.1, align 4)
|
||||
; ALL: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %ir.coerce.dive13)
|
||||
; ALL: $eax = COPY [[LOAD]](s32)
|
||||
; ALL: RET 0, implicit $eax
|
||||
@ -142,7 +142,7 @@ define i64 @test_return_i2(i64 %i.coerce) {
|
||||
; ALL: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.retval
|
||||
; ALL: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.i
|
||||
; ALL: G_STORE [[COPY]](s64), [[FRAME_INDEX1]](p0) :: (store 8 into %ir.0, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.1, align 4), (load 1 from %ir.2, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.1, align 4), (load 1 from %ir.2, align 4)
|
||||
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.3, align 4)
|
||||
; ALL: $rax = COPY [[LOAD]](s64)
|
||||
; ALL: RET 0, implicit $rax
|
||||
@ -174,9 +174,9 @@ define { i64, i32 } @test_return_i3(i64 %i.coerce0, i32 %i.coerce1) {
|
||||
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX2]], [[C1]](s64)
|
||||
; ALL: G_STORE [[COPY1]](s32), [[GEP]](p0) :: (store 4 into %ir.1)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64) :: (store 1 into %ir.2, align 4), (load 1 from %ir.3, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.4, align 4), (load 1 from %ir.5, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64) :: (store 1 into %ir.6, align 8), (load 1 from %ir.7, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX1]](p0), [[FRAME_INDEX2]](p0), [[C]](s64), 0 :: (store 1 into %ir.2, align 4), (load 1 from %ir.3, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.4, align 4), (load 1 from %ir.5, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX3]](p0), [[FRAME_INDEX]](p0), [[C]](s64), 0 :: (store 1 into %ir.6, align 8), (load 1 from %ir.7, align 4)
|
||||
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 8 from %ir.tmp)
|
||||
; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX3]], [[C1]](s64)
|
||||
; ALL: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.tmp + 8, align 8)
|
||||
@ -218,7 +218,7 @@ define { i64, i64 } @test_return_i4(i64 %i.coerce0, i64 %i.coerce1) {
|
||||
; ALL: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
|
||||
; ALL: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX1]], [[C1]](s64)
|
||||
; ALL: G_STORE [[COPY1]](s64), [[GEP]](p0) :: (store 8 into %ir.2, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64) :: (store 1 into %ir.3, align 4), (load 1 from %ir.4, align 4)
|
||||
; ALL: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), [[FRAME_INDEX]](p0), [[FRAME_INDEX1]](p0), [[C]](s64), 0 :: (store 1 into %ir.3, align 4), (load 1 from %ir.4, align 4)
|
||||
; ALL: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (load 8 from %ir.5, align 4)
|
||||
; ALL: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[FRAME_INDEX]], [[C1]](s64)
|
||||
; ALL: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p0) :: (load 8 from %ir.5 + 8, align 4)
|
||||
|
27
llvm/test/MachineVerifier/test_memccpy_intrinsics.mir
Normal file
27
llvm/test/MachineVerifier/test_memccpy_intrinsics.mir
Normal file
@ -0,0 +1,27 @@
|
||||
# RUN: not llc -o - -march=aarch64 -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
|
||||
# REQUIRES: aarch64-registered-target
|
||||
|
||||
---
|
||||
name: test_memcpy_et_al
|
||||
legalized: true
|
||||
regBankSelected: false
|
||||
selected: false
|
||||
tracksRegLiveness: true
|
||||
liveins:
|
||||
body: |
|
||||
bb.0:
|
||||
|
||||
%0:_(p0) = G_IMPLICIT_DEF
|
||||
%1:_(s64) = G_IMPLICIT_DEF
|
||||
%2:_(s1) = G_IMPLICIT_DEF
|
||||
|
||||
; CHECK: Bad machine code: Expected memcpy intrinsic to have 5 operands
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memcpy), %0(p0), %0(p0), %1(s64)
|
||||
|
||||
; CHECK: Bad machine code: Expected memmove intrinsic to have 5 operands
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memmove), %0(p0), %0(p0), %1(s64)
|
||||
|
||||
; CHECK: Bad machine code: Expected memset intrinsic to have 5 operands
|
||||
G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.memset), %0(p0), %0(p0), %1(s64)
|
||||
|
||||
...
|
Loading…
x
Reference in New Issue
Block a user