mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-05 18:27:59 +00:00
hwasan: Move memory access checks into small outlined functions on aarch64.
Each hwasan check requires emitting a small piece of code like this: https://clang.llvm.org/docs/HardwareAssistedAddressSanitizerDesign.html#memory-accesses The problem with this is that these code blocks typically bloat code size significantly. An obvious solution is to outline these blocks of code. In fact, this has already been implemented under the -hwasan-instrument-with-calls flag. However, as currently implemented this has a number of problems: - The functions use the same calling convention as regular C functions. This means that the backend must spill all temporary registers as required by the platform's C calling convention, even though the check only needs two registers on the hot path. - The functions take the address to be checked in a fixed register, which increases register pressure. Both of these factors can diminish the code size effect and increase the performance hit of -hwasan-instrument-with-calls. The solution that this patch implements is to involve the aarch64 backend in outlining the checks. An intrinsic and pseudo-instruction are created to represent a hwasan check. The pseudo-instruction is register allocated like any other instruction, and we allow the register allocator to select almost any register for the address to check. A particular combination of (register selection, type of check) triggers the creation in the backend of a function to handle the check for specifically that pair. The resulting functions are deduplicated by the linker. The pseudo-instruction (really the function) is specified to preserve all registers except for the registers that the AAPCS specifies may be clobbered by a call. To measure the code size and performance effect of this change, I took a number of measurements using Chromium for Android on aarch64, comparing a browser with inlined checks (the baseline) against a browser with outlined checks. Code size: Size of .text decreases from 243897420 to 171619972 bytes, or a 30% decrease. Performance: Using Chromium's blink_perf.layout microbenchmarks I measured a median performance regression of 6.24%. The fact that a perf/size tradeoff is evident here suggests that we might want to make the new behaviour conditional on -Os/-Oz. But for now I've enabled it unconditionally, my reasoning being that hwasan users typically expect a relatively large perf hit, and ~6% isn't really adding much. We may want to revisit this decision in the future, though. I also tried experimenting with varying the number of registers selectable by the hwasan check pseudo-instruction (which would result in fewer variants being created), on the hypothesis that creating fewer variants of the function would expose another perf/size tradeoff by reducing icache pressure from the check functions at the cost of register pressure. Although I did observe a code size increase with fewer registers, I did not observe a strong correlation between the number of registers and the performance of the resulting browser on the microbenchmarks, so I conclude that we might as well use ~all registers to get the maximum code size improvement. My results are below: Regs | .text size | Perf hit -----+------------+--------- ~all | 171619972 | 6.24% 16 | 171765192 | 7.03% 8 | 172917788 | 5.82% 4 | 177054016 | 6.89% Differential Revision: https://reviews.llvm.org/D56954 llvm-svn: 351920
This commit is contained in:
parent
aa6dcf0b5b
commit
2818e607ab
@ -1048,6 +1048,9 @@ def int_icall_branch_funnel : Intrinsic<[], [llvm_vararg_ty], []>;
|
||||
def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
|
||||
[IntrReadMem, IntrArgMemOnly]>;
|
||||
|
||||
def int_hwasan_check_memaccess :
|
||||
Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [IntrInaccessibleMemOnly]>;
|
||||
|
||||
// Xray intrinsics
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Custom event logging for x-ray.
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "llvm/ADT/Triple.h"
|
||||
#include "llvm/ADT/Twine.h"
|
||||
#include "llvm/BinaryFormat/COFF.h"
|
||||
#include "llvm/BinaryFormat/ELF.h"
|
||||
#include "llvm/CodeGen/AsmPrinter.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
@ -43,6 +44,7 @@
|
||||
#include "llvm/MC/MCContext.h"
|
||||
#include "llvm/MC/MCInst.h"
|
||||
#include "llvm/MC/MCInstBuilder.h"
|
||||
#include "llvm/MC/MCSectionELF.h"
|
||||
#include "llvm/MC/MCStreamer.h"
|
||||
#include "llvm/MC/MCSymbol.h"
|
||||
#include "llvm/Support/Casting.h"
|
||||
@ -95,6 +97,10 @@ public:
|
||||
void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
|
||||
void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
|
||||
|
||||
std::map<std::pair<unsigned, uint32_t>, MCSymbol *> HwasanMemaccessSymbols;
|
||||
void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
|
||||
void EmitHwasanMemaccessSymbols(Module &M);
|
||||
|
||||
void EmitSled(const MachineInstr &MI, SledKind Kind);
|
||||
|
||||
/// tblgen'erated driver function for lowering simple MI->MC
|
||||
@ -229,7 +235,109 @@ void AArch64AsmPrinter::EmitSled(const MachineInstr &MI, SledKind Kind)
|
||||
recordSled(CurSled, MI, Kind);
|
||||
}
|
||||
|
||||
void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
|
||||
unsigned Reg = MI.getOperand(0).getReg();
|
||||
uint32_t AccessInfo = MI.getOperand(1).getImm();
|
||||
MCSymbol *&Sym = HwasanMemaccessSymbols[{Reg, AccessInfo}];
|
||||
if (!Sym) {
|
||||
// FIXME: Make this work on non-ELF.
|
||||
if (!TM.getTargetTriple().isOSBinFormatELF())
|
||||
report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
|
||||
|
||||
std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
|
||||
utostr(AccessInfo);
|
||||
Sym = OutContext.getOrCreateSymbol(SymName);
|
||||
}
|
||||
|
||||
EmitToStreamer(*OutStreamer,
|
||||
MCInstBuilder(AArch64::BL)
|
||||
.addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
|
||||
}
|
||||
|
||||
void AArch64AsmPrinter::EmitHwasanMemaccessSymbols(Module &M) {
|
||||
if (HwasanMemaccessSymbols.empty())
|
||||
return;
|
||||
|
||||
const Triple &TT = TM.getTargetTriple();
|
||||
assert(TT.isOSBinFormatELF());
|
||||
std::unique_ptr<MCSubtargetInfo> STI(
|
||||
TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
|
||||
|
||||
MCSymbol *HwasanTagMismatchSym =
|
||||
OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
|
||||
|
||||
for (auto &P : HwasanMemaccessSymbols) {
|
||||
unsigned Reg = P.first.first;
|
||||
uint32_t AccessInfo = P.first.second;
|
||||
MCSymbol *Sym = P.second;
|
||||
|
||||
OutStreamer->SwitchSection(OutContext.getELFSection(
|
||||
".text.hot", ELF::SHT_PROGBITS,
|
||||
ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
|
||||
Sym->getName()));
|
||||
|
||||
OutStreamer->EmitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
|
||||
OutStreamer->EmitSymbolAttribute(Sym, MCSA_Weak);
|
||||
OutStreamer->EmitSymbolAttribute(Sym, MCSA_Hidden);
|
||||
OutStreamer->EmitLabel(Sym);
|
||||
|
||||
OutStreamer->EmitInstruction(MCInstBuilder(AArch64::UBFMXri)
|
||||
.addReg(AArch64::X16)
|
||||
.addReg(Reg)
|
||||
.addImm(4)
|
||||
.addImm(55),
|
||||
*STI);
|
||||
OutStreamer->EmitInstruction(MCInstBuilder(AArch64::LDRBBroX)
|
||||
.addReg(AArch64::W16)
|
||||
.addReg(AArch64::X9)
|
||||
.addReg(AArch64::X16)
|
||||
.addImm(0)
|
||||
.addImm(0),
|
||||
*STI);
|
||||
OutStreamer->EmitInstruction(MCInstBuilder(AArch64::UBFMXri)
|
||||
.addReg(AArch64::X17)
|
||||
.addReg(Reg)
|
||||
.addImm(56)
|
||||
.addImm(63),
|
||||
*STI);
|
||||
OutStreamer->EmitInstruction(MCInstBuilder(AArch64::SUBSWrs)
|
||||
.addReg(AArch64::WZR)
|
||||
.addReg(AArch64::W16)
|
||||
.addReg(AArch64::W17)
|
||||
.addImm(0),
|
||||
*STI);
|
||||
MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
|
||||
OutStreamer->EmitInstruction(
|
||||
MCInstBuilder(AArch64::Bcc)
|
||||
.addImm(AArch64CC::NE)
|
||||
.addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
|
||||
*STI);
|
||||
OutStreamer->EmitInstruction(
|
||||
MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
|
||||
|
||||
OutStreamer->EmitLabel(HandleMismatchSym);
|
||||
if (Reg != AArch64::X0)
|
||||
OutStreamer->EmitInstruction(MCInstBuilder(AArch64::ORRXrs)
|
||||
.addReg(AArch64::X0)
|
||||
.addReg(AArch64::XZR)
|
||||
.addReg(Reg)
|
||||
.addImm(0),
|
||||
*STI);
|
||||
OutStreamer->EmitInstruction(MCInstBuilder(AArch64::MOVZXi)
|
||||
.addReg(AArch64::X1)
|
||||
.addImm(AccessInfo)
|
||||
.addImm(0),
|
||||
*STI);
|
||||
OutStreamer->EmitInstruction(
|
||||
MCInstBuilder(AArch64::B)
|
||||
.addExpr(MCSymbolRefExpr::create(HwasanTagMismatchSym, OutContext)),
|
||||
*STI);
|
||||
}
|
||||
}
|
||||
|
||||
void AArch64AsmPrinter::EmitEndOfAsmFile(Module &M) {
|
||||
EmitHwasanMemaccessSymbols(M);
|
||||
|
||||
const Triple &TT = TM.getTargetTriple();
|
||||
if (TT.isOSBinFormatMachO()) {
|
||||
// Funny Darwin hack: This flag tells the linker that no global symbols
|
||||
@ -883,6 +991,10 @@ void AArch64AsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
LowerPATCHABLE_TAIL_CALL(*MI);
|
||||
return;
|
||||
|
||||
case AArch64::HWASAN_CHECK_MEMACCESS:
|
||||
LowerHWASAN_CHECK_MEMACCESS(*MI);
|
||||
return;
|
||||
|
||||
case AArch64::SEH_StackAlloc:
|
||||
TS->EmitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
|
||||
return;
|
||||
|
@ -763,6 +763,13 @@ def MSRpstateImm4 : MSRpstateImm0_15;
|
||||
def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
|
||||
[(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
|
||||
|
||||
let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
|
||||
def HWASAN_CHECK_MEMACCESS : Pseudo<
|
||||
(outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
|
||||
[(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 imm:$accessinfo))]>,
|
||||
Sched<[]>;
|
||||
}
|
||||
|
||||
// The cycle counter PMC register is PMCCNTR_EL0.
|
||||
let Predicates = [HasPerfMon] in
|
||||
def : Pat<(readcyclecounter), (MRS 0xdce8)>;
|
||||
|
@ -248,6 +248,9 @@ const RegisterBank &AArch64RegisterBankInfo::getRegBankFromRegClass(
|
||||
case AArch64::GPR64spRegClassID:
|
||||
case AArch64::GPR64sponlyRegClassID:
|
||||
case AArch64::GPR64allRegClassID:
|
||||
case AArch64::GPR64noipRegClassID:
|
||||
case AArch64::GPR64common_and_GPR64noipRegClassID:
|
||||
case AArch64::GPR64noip_and_tcGPR64RegClassID:
|
||||
case AArch64::tcGPR64RegClassID:
|
||||
case AArch64::WSeqPairsClassRegClassID:
|
||||
case AArch64::XSeqPairsClassRegClassID:
|
||||
|
@ -205,6 +205,11 @@ def tcGPR64 : RegisterClass<"AArch64", [i64], 64, (sub GPR64common, X19, X20, X2
|
||||
// BTI-protected function.
|
||||
def rtcGPR64 : RegisterClass<"AArch64", [i64], 64, (add X16, X17)>;
|
||||
|
||||
// Register set that excludes registers that are reserved for procedure calls.
|
||||
// This is used for pseudo-instructions that are actually implemented using a
|
||||
// procedure call.
|
||||
def GPR64noip : RegisterClass<"AArch64", [i64], 64, (sub GPR64, X16, X17, LR)>;
|
||||
|
||||
// GPR register classes for post increment amount of vector load/store that
|
||||
// has alternate printing when Rm=31 and prints a constant immediate value
|
||||
// equal to the total number of bytes transferred.
|
||||
|
@ -155,6 +155,11 @@ static cl::opt<bool>
|
||||
ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
|
||||
cl::desc("instrument memory intrinsics"),
|
||||
cl::Hidden, cl::init(true));
|
||||
|
||||
static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
|
||||
cl::desc("inline all checks"),
|
||||
cl::Hidden, cl::init(false));
|
||||
|
||||
namespace {
|
||||
|
||||
/// An instrumentation pass implementing detection of addressability bugs
|
||||
@ -181,8 +186,9 @@ public:
|
||||
Value *getDynamicShadowNonTls(IRBuilder<> &IRB);
|
||||
|
||||
void untagPointerOperand(Instruction *I, Value *Addr);
|
||||
Value *memToShadow(Value *Shadow, Type *Ty, IRBuilder<> &IRB);
|
||||
void instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
|
||||
Value *shadowBase();
|
||||
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
|
||||
void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
|
||||
unsigned AccessSizeIndex,
|
||||
Instruction *InsertBefore);
|
||||
void instrumentMemIntrinsic(MemIntrinsic *MI);
|
||||
@ -252,6 +258,7 @@ private:
|
||||
Type *IntptrTy;
|
||||
Type *Int8PtrTy;
|
||||
Type *Int8Ty;
|
||||
Type *Int32Ty;
|
||||
|
||||
bool CompileKernel;
|
||||
bool Recover;
|
||||
@ -307,6 +314,7 @@ bool HWAddressSanitizer::doInitialization(Module &M) {
|
||||
IntptrTy = IRB.getIntPtrTy(DL);
|
||||
Int8PtrTy = IRB.getInt8PtrTy();
|
||||
Int8Ty = IRB.getInt8Ty();
|
||||
Int32Ty = IRB.getInt32Ty();
|
||||
|
||||
HwasanCtorFunction = nullptr;
|
||||
if (!CompileKernel) {
|
||||
@ -403,16 +411,16 @@ Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
|
||||
|
||||
if (Mapping.InGlobal) {
|
||||
// An empty inline asm with input reg == output reg.
|
||||
// An opaque pointer-to-int cast, basically.
|
||||
// An opaque no-op cast, basically.
|
||||
InlineAsm *Asm = InlineAsm::get(
|
||||
FunctionType::get(IntptrTy, {ShadowGlobal->getType()}, false),
|
||||
FunctionType::get(Int8PtrTy, {ShadowGlobal->getType()}, false),
|
||||
StringRef(""), StringRef("=r,0"),
|
||||
/*hasSideEffects=*/false);
|
||||
return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
|
||||
} else {
|
||||
Value *GlobalDynamicAddress =
|
||||
IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
|
||||
kHwasanShadowMemoryDynamicAddress, IntptrTy);
|
||||
kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
|
||||
return IRB.CreateLoad(GlobalDynamicAddress);
|
||||
}
|
||||
}
|
||||
@ -505,29 +513,44 @@ void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
|
||||
I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
|
||||
}
|
||||
|
||||
Value *HWAddressSanitizer::memToShadow(Value *Mem, Type *Ty, IRBuilder<> &IRB) {
|
||||
Value *HWAddressSanitizer::shadowBase() {
|
||||
if (LocalDynamicShadow)
|
||||
return LocalDynamicShadow;
|
||||
return ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, Mapping.Offset),
|
||||
Int8PtrTy);
|
||||
}
|
||||
|
||||
Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
|
||||
// Mem >> Scale
|
||||
Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
|
||||
if (Mapping.Offset == 0)
|
||||
return Shadow;
|
||||
return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
|
||||
// (Mem >> Scale) + Offset
|
||||
Value *ShadowBase;
|
||||
if (LocalDynamicShadow)
|
||||
ShadowBase = LocalDynamicShadow;
|
||||
else
|
||||
ShadowBase = ConstantInt::get(Ty, Mapping.Offset);
|
||||
return IRB.CreateAdd(Shadow, ShadowBase);
|
||||
return IRB.CreateGEP(Int8Ty, shadowBase(), Shadow);
|
||||
}
|
||||
|
||||
void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
|
||||
void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
|
||||
unsigned AccessSizeIndex,
|
||||
Instruction *InsertBefore) {
|
||||
const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
|
||||
IRBuilder<> IRB(InsertBefore);
|
||||
|
||||
if (!ClInlineAllChecks && TargetTriple.isAArch64() &&
|
||||
TargetTriple.isOSBinFormatELF() && !Recover) {
|
||||
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
|
||||
Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
|
||||
IRB.CreateCall(
|
||||
Intrinsic::getDeclaration(M, Intrinsic::hwasan_check_memaccess),
|
||||
{shadowBase(), Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
|
||||
return;
|
||||
}
|
||||
|
||||
Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
|
||||
Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
|
||||
IRB.getInt8Ty());
|
||||
Value *AddrLong = untagPointer(IRB, PtrLong);
|
||||
Value *ShadowLong = memToShadow(AddrLong, PtrLong->getType(), IRB);
|
||||
Value *MemTag = IRB.CreateLoad(IRB.CreateIntToPtr(ShadowLong, Int8PtrTy));
|
||||
Value *Shadow = memToShadow(AddrLong, IRB);
|
||||
Value *MemTag = IRB.CreateLoad(Shadow);
|
||||
Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
|
||||
|
||||
int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
|
||||
@ -543,7 +566,6 @@ void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
|
||||
MDBuilder(*C).createBranchWeights(1, 100000));
|
||||
|
||||
IRB.SetInsertPoint(CheckTerm);
|
||||
const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
|
||||
InlineAsm *Asm;
|
||||
switch (TargetTriple.getArch()) {
|
||||
case Triple::x86_64:
|
||||
@ -609,7 +631,6 @@ bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
|
||||
return false; //FIXME
|
||||
|
||||
IRBuilder<> IRB(I);
|
||||
Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
|
||||
if (isPowerOf2_64(TypeSize) &&
|
||||
(TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
|
||||
(Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
|
||||
@ -617,13 +638,14 @@ bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
|
||||
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
|
||||
if (ClInstrumentWithCalls) {
|
||||
IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
|
||||
AddrLong);
|
||||
IRB.CreatePointerCast(Addr, IntptrTy));
|
||||
} else {
|
||||
instrumentMemAccessInline(AddrLong, IsWrite, AccessSizeIndex, I);
|
||||
instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I);
|
||||
}
|
||||
} else {
|
||||
IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
|
||||
{AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8)});
|
||||
{IRB.CreatePointerCast(Addr, IntptrTy),
|
||||
ConstantInt::get(IntptrTy, TypeSize / 8)});
|
||||
}
|
||||
untagPointerOperand(I, Addr);
|
||||
|
||||
@ -654,9 +676,7 @@ bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
|
||||
ConstantInt::get(IntptrTy, Size)});
|
||||
} else {
|
||||
size_t ShadowSize = Size >> Mapping.Scale;
|
||||
Value *ShadowPtr = IRB.CreateIntToPtr(
|
||||
memToShadow(IRB.CreatePointerCast(AI, IntptrTy), AI->getType(), IRB),
|
||||
Int8PtrTy);
|
||||
Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
|
||||
// If this memset is not inlined, it will be intercepted in the hwasan
|
||||
// runtime library. That's OK, because the interceptor skips the checks if
|
||||
// the address is in the shadow region.
|
||||
@ -883,6 +903,7 @@ Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
|
||||
ThreadLongMaybeUntagged,
|
||||
ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
|
||||
ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
|
||||
ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
|
||||
return ShadowBase;
|
||||
}
|
||||
|
||||
|
63
test/CodeGen/AArch64/hwasan-check-memaccess.ll
Normal file
63
test/CodeGen/AArch64/hwasan-check-memaccess.ll
Normal file
@ -0,0 +1,63 @@
|
||||
; RUN: llc < %s | FileCheck %s
|
||||
|
||||
target triple = "aarch64--linux-android"
|
||||
|
||||
define i8* @f1(i8* %x0, i8* %x1) {
|
||||
; CHECK: f1:
|
||||
; CHECK: str x30, [sp, #-16]!
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x9, x0
|
||||
; CHECK-NEXT: bl __hwasan_check_x1_123
|
||||
; CHECK-NEXT: mov x0, x1
|
||||
; CHECK-NEXT: ldr x30, [sp], #16
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.hwasan.check.memaccess(i8* %x0, i8* %x1, i32 123)
|
||||
ret i8* %x1
|
||||
}
|
||||
|
||||
define i8* @f2(i8* %x0, i8* %x1) {
|
||||
; CHECK: f2:
|
||||
; CHECK: str x30, [sp, #-16]!
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: .cfi_offset w30, -16
|
||||
; CHECK-NEXT: mov x9, x1
|
||||
; CHECK-NEXT: bl __hwasan_check_x0_456
|
||||
; CHECK-NEXT: ldr x30, [sp], #16
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.hwasan.check.memaccess(i8* %x1, i8* %x0, i32 456)
|
||||
ret i8* %x0
|
||||
}
|
||||
|
||||
declare void @llvm.hwasan.check.memaccess(i8*, i8*, i32)
|
||||
|
||||
; CHECK: .section .text.hot,"axG",@progbits,__hwasan_check_x0_456,comdat
|
||||
; CHECK-NEXT: .type __hwasan_check_x0_456,@function
|
||||
; CHECK-NEXT: .weak __hwasan_check_x0_456
|
||||
; CHECK-NEXT: .hidden __hwasan_check_x0_456
|
||||
; CHECK-NEXT: __hwasan_check_x0_456:
|
||||
; CHECK-NEXT: ubfx x16, x0, #4, #52
|
||||
; CHECK-NEXT: ldrb w16, [x9, x16]
|
||||
; CHECK-NEXT: lsr x17, x0, #56
|
||||
; CHECK-NEXT: cmp w16, w17
|
||||
; CHECK-NEXT: b.ne .Ltmp0
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-NEXT: .Ltmp0:
|
||||
; CHECK-NEXT: mov x1, #456
|
||||
; CHECK-NEXT: b __hwasan_tag_mismatch
|
||||
|
||||
; CHECK: .section .text.hot,"axG",@progbits,__hwasan_check_x1_123,comdat
|
||||
; CHECK-NEXT: .type __hwasan_check_x1_123,@function
|
||||
; CHECK-NEXT: .weak __hwasan_check_x1_123
|
||||
; CHECK-NEXT: .hidden __hwasan_check_x1_123
|
||||
; CHECK-NEXT: __hwasan_check_x1_123:
|
||||
; CHECK-NEXT: ubfx x16, x1, #4, #52
|
||||
; CHECK-NEXT: ldrb w16, [x9, x16]
|
||||
; CHECK-NEXT: lsr x17, x1, #56
|
||||
; CHECK-NEXT: cmp w16, w17
|
||||
; CHECK-NEXT: b.ne .Ltmp1
|
||||
; CHECK-NEXT: ret
|
||||
; CHECK-NEXT: .Ltmp1:
|
||||
; CHECK-NEXT: mov x0, x1
|
||||
; CHECK-NEXT: mov x1, #123
|
||||
; CHECK-NEXT: b __hwasan_tag_mismatch
|
@ -26,8 +26,7 @@ define void @test_alloca() sanitize_hwaddress {
|
||||
; CHECK: %[[X_TAG2:[^ ]*]] = trunc i64 %[[X_TAG]] to i8
|
||||
; CHECK: %[[E:[^ ]*]] = ptrtoint i32* %[[X]] to i64
|
||||
; CHECK: %[[F:[^ ]*]] = lshr i64 %[[E]], 4
|
||||
; DYNAMIC-SHADOW: %[[F_DYN:[^ ]*]] = add i64 %[[F]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[X_SHADOW:[^ ]*]] = inttoptr i64 %[[F_DYN]] to i8*
|
||||
; DYNAMIC-SHADOW: %[[X_SHADOW:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %[[F]]
|
||||
; ZERO-BASED-SHADOW: %[[X_SHADOW:[^ ]*]] = inttoptr i64 %[[F]] to i8*
|
||||
; CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %[[X_SHADOW]], i8 %[[X_TAG2]], i64 1, i1 false)
|
||||
; CHECK: call void @use32(i32* nonnull %[[X_HWASAN]])
|
||||
@ -36,8 +35,7 @@ define void @test_alloca() sanitize_hwaddress {
|
||||
; UAR-TAGS: %[[X_TAG_UAR:[^ ]*]] = trunc i64 %[[BASE_TAG_COMPL]] to i8
|
||||
; CHECK: %[[E2:[^ ]*]] = ptrtoint i32* %[[X]] to i64
|
||||
; CHECK: %[[F2:[^ ]*]] = lshr i64 %[[E2]], 4
|
||||
; DYNAMIC-SHADOW: %[[F2_DYN:[^ ]*]] = add i64 %[[F2]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[X_SHADOW2:[^ ]*]] = inttoptr i64 %[[F2_DYN]] to i8*
|
||||
; DYNAMIC-SHADOW: %[[X_SHADOW2:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %[[F2]]
|
||||
; ZERO-BASED-SHADOW: %[[X_SHADOW2:[^ ]*]] = inttoptr i64 %[[F2]] to i8*
|
||||
; NO-UAR-TAGS: call void @llvm.memset.p0i8.i64(i8* align 1 %[[X_SHADOW2]], i8 0, i64 1, i1 false)
|
||||
; UAR-TAGS: call void @llvm.memset.p0i8.i64(i8* align 1 %[[X_SHADOW2]], i8 %[[X_TAG_UAR]], i64 1, i1 false)
|
||||
|
@ -7,8 +7,8 @@ target triple = "aarch64--linux-android"
|
||||
|
||||
define void @atomicrmw(i64* %ptr) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @atomicrmw(
|
||||
; CHECK: lshr i64 %[[A:[^ ]*]], 56
|
||||
; CHECK: call void asm sideeffect "brk #2323", "{x0}"(i64 %[[A]])
|
||||
; CHECK: [[PTRI8:%[^ ]*]] = bitcast i64* %ptr to i8*
|
||||
; CHECK: call void @llvm.hwasan.check.memaccess({{.*}}, i8* [[PTRI8]], i32 19)
|
||||
; CHECK: atomicrmw add i64* %ptr, i64 1 seq_cst
|
||||
; CHECK: ret void
|
||||
|
||||
@ -19,8 +19,8 @@ entry:
|
||||
|
||||
define void @cmpxchg(i64* %ptr, i64 %compare_to, i64 %new_value) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @cmpxchg(
|
||||
; CHECK: lshr i64 %[[A:[^ ]*]], 56
|
||||
; CHECK: call void asm sideeffect "brk #2323", "{x0}"(i64 %[[A]])
|
||||
; CHECK: [[PTRI8:%[^ ]*]] = bitcast i64* %ptr to i8*
|
||||
; CHECK: call void @llvm.hwasan.check.memaccess({{.*}}, i8* [[PTRI8]], i32 19)
|
||||
; CHECK: cmpxchg i64* %ptr, i64 %compare_to, i64 %new_value seq_cst seq_cst
|
||||
; CHECK: ret void
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
; Test basic address sanitizer instrumentation.
|
||||
;
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=0 -hwasan-with-ifunc=1 -hwasan-with-tls=0 -S | FileCheck %s --check-prefixes=CHECK,ABORT,DYNAMIC-SHADOW
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=1 -hwasan-with-ifunc=1 -hwasan-with-tls=0 -S | FileCheck %s --check-prefixes=CHECK,RECOVER,DYNAMIC-SHADOW
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=0 -hwasan-mapping-offset=0 -S | FileCheck %s --check-prefixes=CHECK,ABORT,ZERO-BASED-SHADOW
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=1 -hwasan-mapping-offset=0 -S | FileCheck %s --check-prefixes=CHECK,RECOVER,ZERO-BASED-SHADOW
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=0 -hwasan-with-ifunc=1 -hwasan-with-tls=0 -S | FileCheck %s --check-prefixes=CHECK,ABORT,ABORT-DYNAMIC-SHADOW
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=1 -hwasan-with-ifunc=1 -hwasan-with-tls=0 -S | FileCheck %s --check-prefixes=CHECK,RECOVER,RECOVER-DYNAMIC-SHADOW
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=0 -hwasan-mapping-offset=0 -S | FileCheck %s --check-prefixes=CHECK,ABORT,ABORT-ZERO-BASED-SHADOW
|
||||
; RUN: opt < %s -hwasan -hwasan-recover=1 -hwasan-mapping-offset=0 -S | FileCheck %s --check-prefixes=CHECK,RECOVER,RECOVER-ZERO-BASED-SHADOW
|
||||
|
||||
; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 0, void ()* @hwasan.module_ctor, i8* bitcast (void ()* @hwasan.module_ctor to i8*) }]
|
||||
; CHECK: @__hwasan = private constant [0 x i8] zeroinitializer, section "__hwasan_frames", comdat($hwasan.module_ctor)
|
||||
@ -13,23 +13,23 @@ target triple = "aarch64--linux-android"
|
||||
|
||||
define i8 @test_load8(i8* %a) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_load8(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i8* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i8* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2304", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2336", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %a, i32 0)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %a, i32 0)
|
||||
|
||||
; CHECK: %[[G:[^ ]*]] = load i8, i8* %a, align 4
|
||||
; CHECK: ret i8 %[[G]]
|
||||
|
||||
@ -40,23 +40,24 @@ entry:
|
||||
|
||||
define i16 @test_load16(i16* %a) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_load16(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i16* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i16* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2305", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2337", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i16* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 1)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 1)
|
||||
|
||||
; CHECK: %[[G:[^ ]*]] = load i16, i16* %a, align 4
|
||||
; CHECK: ret i16 %[[G]]
|
||||
|
||||
@ -67,23 +68,24 @@ entry:
|
||||
|
||||
define i32 @test_load32(i32* %a) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_load32(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i32* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i32* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2306", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2338", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i32* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 2)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 2)
|
||||
|
||||
; CHECK: %[[G:[^ ]*]] = load i32, i32* %a, align 4
|
||||
; CHECK: ret i32 %[[G]]
|
||||
|
||||
@ -94,23 +96,24 @@ entry:
|
||||
|
||||
define i64 @test_load64(i64* %a) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_load64(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i64* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i64* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2307", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2339", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i64* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 3)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 3)
|
||||
|
||||
; CHECK: %[[G:[^ ]*]] = load i64, i64* %a, align 8
|
||||
; CHECK: ret i64 %[[G]]
|
||||
|
||||
@ -121,23 +124,24 @@ entry:
|
||||
|
||||
define i128 @test_load128(i128* %a) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_load128(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i128* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i128* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2308", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2340", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i128* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 4)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 4)
|
||||
|
||||
; CHECK: %[[G:[^ ]*]] = load i128, i128* %a, align 16
|
||||
; CHECK: ret i128 %[[G]]
|
||||
|
||||
@ -161,23 +165,23 @@ entry:
|
||||
|
||||
define void @test_store8(i8* %a, i8 %b) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_store8(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i8* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i8* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2320", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2352", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %a, i32 16)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %a, i32 16)
|
||||
|
||||
; CHECK: store i8 %b, i8* %a, align 4
|
||||
; CHECK: ret void
|
||||
|
||||
@ -188,23 +192,24 @@ entry:
|
||||
|
||||
define void @test_store16(i16* %a, i16 %b) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_store16(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i16* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i16* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2321", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2353", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i16* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 17)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 17)
|
||||
|
||||
; CHECK: store i16 %b, i16* %a, align 4
|
||||
; CHECK: ret void
|
||||
|
||||
@ -215,23 +220,24 @@ entry:
|
||||
|
||||
define void @test_store32(i32* %a, i32 %b) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_store32(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i32* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i32* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2322", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2354", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i32* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 18)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 18)
|
||||
|
||||
; CHECK: store i32 %b, i32* %a, align 4
|
||||
; CHECK: ret void
|
||||
|
||||
@ -242,23 +248,24 @@ entry:
|
||||
|
||||
define void @test_store64(i64* %a, i64 %b) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_store64(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i64* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i64* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2323", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2355", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i64* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 19)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 19)
|
||||
|
||||
; CHECK: store i64 %b, i64* %a, align 8
|
||||
; CHECK: ret void
|
||||
|
||||
@ -269,23 +276,24 @@ entry:
|
||||
|
||||
define void @test_store128(i128* %a, i128 %b) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_store128(
|
||||
; CHECK: %[[A:[^ ]*]] = ptrtoint i128* %a to i64
|
||||
; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; CHECK: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; CHECK: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; CHECK: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; DYNAMIC-SHADOW: %[[D_DYN:[^ ]*]] = add i64 %[[D]], %.hwasan.shadow
|
||||
; DYNAMIC-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D_DYN]] to i8*
|
||||
; ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; CHECK: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
; RECOVER: %[[A:[^ ]*]] = ptrtoint i128* %a to i64
|
||||
; RECOVER: %[[B:[^ ]*]] = lshr i64 %[[A]], 56
|
||||
; RECOVER: %[[PTRTAG:[^ ]*]] = trunc i64 %[[B]] to i8
|
||||
; RECOVER: %[[C:[^ ]*]] = and i64 %[[A]], 72057594037927935
|
||||
; RECOVER: %[[D:[^ ]*]] = lshr i64 %[[C]], 4
|
||||
; RECOVER-DYNAMIC-SHADOW: %[[E:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %4
|
||||
; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2324", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2356", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
|
||||
; ABORT: %[[A:[^ ]*]] = bitcast i128* %a to i8*
|
||||
; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 20)
|
||||
; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %[[A]], i32 20)
|
||||
|
||||
; CHECK: store i128 %b, i128* %a, align 16
|
||||
; CHECK: ret void
|
||||
|
||||
|
@ -1,11 +1,9 @@
|
||||
; Test KHWASan instrumentation.
|
||||
;
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -S | FileCheck %s --allow-empty --check-prefixes=INIT
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,MATCH-ALL
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-mapping-offset=12345678 -S | FileCheck %s --check-prefixes=CHECK,OFFSET,MATCH-ALL
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=0 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,ABORT,MATCH-ALL
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,RECOVER,MATCH-ALL
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -hwasan-match-all-tag=-1 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,RECOVER,NO-MATCH-ALL
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -S | FileCheck %s --allow-empty --check-prefixes=INIT
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,MATCH-ALL
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -hwasan-mapping-offset=12345678 -S | FileCheck %s --check-prefixes=CHECK,OFFSET,MATCH-ALL
|
||||
; RUN: opt < %s -hwasan -hwasan-kernel=1 -hwasan-recover=1 -hwasan-match-all-tag=-1 -S | FileCheck %s --check-prefixes=CHECK,NOOFFSET,NO-MATCH-ALL
|
||||
|
||||
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
|
||||
target triple = "aarch64--linux-android"
|
||||
@ -20,8 +18,7 @@ define i8 @test_load(i8* %a) sanitize_hwaddress {
|
||||
|
||||
; NOOFFSET: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
|
||||
|
||||
; OFFSET: %[[D1:[^ ]*]] = add i64 %[[D]], 12345678
|
||||
; OFFSET: %[[E:[^ ]*]] = inttoptr i64 %[[D1]] to i8*
|
||||
; OFFSET: %[[E:[^ ]*]] = getelementptr i8, i8* inttoptr (i64 12345678 to i8*), i64 %[[D]]
|
||||
|
||||
; CHECK: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
|
||||
; CHECK: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
|
||||
@ -32,10 +29,8 @@ define i8 @test_load(i8* %a) sanitize_hwaddress {
|
||||
|
||||
; NO-MATCH-ALL: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
|
||||
|
||||
; ABORT: call void asm sideeffect "brk #2304", "{x0}"(i64 %[[A]])
|
||||
; ABORT: unreachable
|
||||
; RECOVER: call void asm sideeffect "brk #2336", "{x0}"(i64 %[[A]])
|
||||
; RECOVER: br label
|
||||
; CHECK: call void asm sideeffect "brk #2336", "{x0}"(i64 %[[A]])
|
||||
; CHECK: br label
|
||||
|
||||
; CHECK: %[[G:[^ ]*]] = load i8, i8* %a, align 4
|
||||
; CHECK: ret i8 %[[G]]
|
||||
|
@ -23,10 +23,10 @@ define i32 @test_load(i32* %a) sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_load
|
||||
; CHECK: entry:
|
||||
|
||||
; CHECK-IFUNC: %[[A:[^ ]*]] = call i64 asm "", "=r,0"([0 x i8]* @__hwasan_shadow)
|
||||
; CHECK-IFUNC: add i64 %{{.*}}, %[[A]]
|
||||
; CHECK-IFUNC: %[[A:[^ ]*]] = call i8* asm "", "=r,0"([0 x i8]* @__hwasan_shadow)
|
||||
; CHECK-IFUNC: @llvm.hwasan.check.memaccess(i8* %[[A]]
|
||||
|
||||
; CHECK-GLOBAL: load i64, i64* @__hwasan_shadow_memory_dynamic_address
|
||||
; CHECK-GLOBAL: load i8*, i8** @__hwasan_shadow_memory_dynamic_address
|
||||
|
||||
; CHECK-TLS: %[[A:[^ ]*]] = call i8* @llvm.thread.pointer()
|
||||
; CHECK-TLS: %[[B:[^ ]*]] = getelementptr i8, i8* %[[A]], i32 48
|
||||
@ -54,10 +54,10 @@ define void @test_alloca() sanitize_hwaddress {
|
||||
; CHECK-LABEL: @test_alloca
|
||||
; CHECK: entry:
|
||||
|
||||
; CHECK-IFUNC: %[[A:[^ ]*]] = call i64 asm "", "=r,0"([0 x i8]* @__hwasan_shadow)
|
||||
; CHECK-IFUNC: add i64 %{{.*}}, %[[A]]
|
||||
; CHECK-IFUNC: %[[A:[^ ]*]] = call i8* asm "", "=r,0"([0 x i8]* @__hwasan_shadow)
|
||||
; CHECK-IFUNC: getelementptr i8, i8* %[[A]]
|
||||
|
||||
; CHECK-GLOBAL: load i64, i64* @__hwasan_shadow_memory_dynamic_address
|
||||
; CHECK-GLOBAL: load i8*, i8** @__hwasan_shadow_memory_dynamic_address
|
||||
|
||||
; CHECK-TLS: %[[A:[^ ]*]] = call i8* @llvm.thread.pointer()
|
||||
; CHECK-TLS: %[[B:[^ ]*]] = getelementptr i8, i8* %[[A]], i32 48
|
||||
|
Loading…
Reference in New Issue
Block a user