mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-11-25 23:00:15 +00:00
Delete x86_64 ShadowCallStack support
Summary: ShadowCallStack on x86_64 suffered from the same racy security issues as Return Flow Guard and had performance overhead as high as 13% depending on the benchmark. x86_64 ShadowCallStack was always an experimental feature and never shipped a runtime required to support it, as such there are no expected downstream users. Reviewers: pcc Reviewed By: pcc Subscribers: mgorny, javed.absar, hiraditya, jdoerfert, cfe-commits, #sanitizers, llvm-commits Tags: #clang, #sanitizers, #llvm Differential Revision: https://reviews.llvm.org/D59034 llvm-svn: 355624
This commit is contained in:
parent
de3348ae3f
commit
2e1479e2f2
@ -9,7 +9,7 @@ Introduction
|
||||
============
|
||||
|
||||
ShadowCallStack is an instrumentation pass, currently only implemented for
|
||||
aarch64 and x86_64, that protects programs against return address overwrites
|
||||
aarch64, that protects programs against return address overwrites
|
||||
(e.g. stack buffer overflows.) It works by saving a function's return address
|
||||
to a separately allocated 'shadow call stack' in the function prolog in
|
||||
non-leaf functions and loading the return address from the shadow call stack
|
||||
@ -18,11 +18,10 @@ for compatibility with unwinders, but is otherwise unused.
|
||||
|
||||
The aarch64 implementation is considered production ready, and
|
||||
an `implementation of the runtime`_ has been added to Android's libc
|
||||
(bionic). The x86_64 implementation was evaluated using Chromium and was
|
||||
found to have critical performance and security deficiencies, and may be
|
||||
removed in a future release of the compiler. This document only describes
|
||||
the aarch64 implementation; details on the x86_64 implementation are found
|
||||
in the `Clang 7.0.1 documentation`_.
|
||||
(bionic). An x86_64 implementation was evaluated using Chromium and was found
|
||||
to have critical performance and security deficiencies--it was removed in
|
||||
LLVM 9.0. Details on the x86_64 implementation can be found in the
|
||||
`Clang 7.0.1 documentation`_.
|
||||
|
||||
.. _`implementation of the runtime`: https://android.googlesource.com/platform/bionic/+/808d176e7e0dd727c7f929622ec017f6e065c582/libc/bionic/pthread_create.cpp#128
|
||||
.. _`Clang 7.0.1 documentation`: https://releases.llvm.org/7.0.1/tools/clang/docs/ShadowCallStack.html
|
||||
@ -37,10 +36,9 @@ consuming more memory for shorter function prologs and epilogs with fewer
|
||||
memory accesses.
|
||||
|
||||
`Return Flow Guard`_ is a pure software implementation of shadow call stacks
|
||||
on x86_64. It is similar to the ShadowCallStack x86_64 implementation but
|
||||
trades off higher memory usage for a shorter prologue and epilogue. Like
|
||||
x86_64 ShadowCallStack, it is inherently racy due to the architecture's use
|
||||
of the stack for calls and returns.
|
||||
on x86_64. Like the previous implementation of ShadowCallStack on x86_64, it is
|
||||
inherently racy due to the architecture's use of the stack for calls and
|
||||
returns.
|
||||
|
||||
Intel `Control-flow Enforcement Technology`_ (CET) is a proposed hardware
|
||||
extension that would add native support to use a shadow stack to store/check
|
||||
|
@ -254,7 +254,7 @@ set(ALL_XRAY_SUPPORTED_ARCH ${X86_64})
|
||||
else()
|
||||
set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le)
|
||||
endif()
|
||||
set(ALL_SHADOWCALLSTACK_SUPPORTED_ARCH ${X86_64} ${ARM64})
|
||||
set(ALL_SHADOWCALLSTACK_SUPPORTED_ARCH ${ARM64})
|
||||
|
||||
if(APPLE)
|
||||
include(CompilerRTDarwinUtils)
|
||||
|
@ -33,9 +33,5 @@ __attribute__((noinline)) void scs_fputs_stdout(const char *p) {
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
__attribute__((noinline)) void scs_fputs_stdout(const char *p) {
|
||||
fputs(p, stdout);
|
||||
}
|
||||
|
||||
#error Unsupported platform
|
||||
#endif
|
||||
|
@ -19,5 +19,5 @@ if config.target_arch == 'aarch64':
|
||||
scs_arch_cflags += ' -ffixed-x18 '
|
||||
config.substitutions.append( ("%clang_scs ", config.clang + ' -O0 -fsanitize=shadow-call-stack ' + scs_arch_cflags + ' ') )
|
||||
|
||||
if config.host_os not in ['Linux'] or config.target_arch not in ['x86_64', 'aarch64']:
|
||||
if config.host_os not in ['Linux'] or config.target_arch not in ['aarch64']:
|
||||
config.unsupported = True
|
||||
|
@ -4,10 +4,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef __x86_64__
|
||||
#include <asm/prctl.h>
|
||||
int arch_prctl(int code, void *addr);
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/prctl.h>
|
||||
@ -21,10 +17,7 @@ static void __shadowcallstack_init() {
|
||||
if (stack == MAP_FAILED)
|
||||
abort();
|
||||
|
||||
#if defined(__x86_64__)
|
||||
if (arch_prctl(ARCH_SET_GS, stack))
|
||||
abort();
|
||||
#elif defined(__aarch64__)
|
||||
#if defined(__aarch64__)
|
||||
__asm__ __volatile__("mov x18, %0" ::"r"(stack));
|
||||
#else
|
||||
#error Unsupported platform
|
||||
|
@ -1,5 +0,0 @@
|
||||
// See overflow.c for a description.
|
||||
|
||||
// REQUIRES: aarch64-target-arch
|
||||
// RUN: %clang_scs %S/overflow.c -o %t -DITERATIONS=12
|
||||
// RUN: %run %t | FileCheck %S/overflow.c
|
@ -1,5 +0,0 @@
|
||||
// See overflow.c for a description.
|
||||
|
||||
// REQUIRES: x86_64-target-arch
|
||||
// RUN: %clang_scs %S/overflow.c -o %t -DITERATIONS=12
|
||||
// RUN: not --crash %run %t
|
@ -8,12 +8,10 @@
|
||||
// RUN: %clang_scs %s -o %t -DITERATIONS=3
|
||||
// RUN: %run %t | FileCheck %s
|
||||
|
||||
// The behavioral check for SCS + overflow lives in the tests overflow-x86_64.c
|
||||
// and overflow-aarch64.c. This is because the expected behavior is different
|
||||
// between the two platforms. On x86_64 we crash because the comparison between
|
||||
// the shadow call stack and the regular stack fails. On aarch64 there is no
|
||||
// comparison, we just load the return address from the shadow call stack. So we
|
||||
// just expect not to see the output from print_and_exit.
|
||||
// On aarch64 we just load the return address from the shadow call stack so we
|
||||
// do not expect to see the output from print_and_exit.
|
||||
// RUN: %clang_scs %s -o %t -DITERATIONS=12
|
||||
// RUN: %run %t | FileCheck %S/overflow.c
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -22,7 +22,6 @@ endif()
|
||||
add_public_tablegen_target(X86CommonTableGen)
|
||||
|
||||
set(sources
|
||||
ShadowCallStack.cpp
|
||||
X86AsmPrinter.cpp
|
||||
X86CallFrameOptimization.cpp
|
||||
X86CallingConv.cpp
|
||||
|
@ -1,321 +0,0 @@
|
||||
//===------- ShadowCallStack.cpp - Shadow Call Stack pass -----------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The ShadowCallStack pass instruments function prologs/epilogs to check that
|
||||
// the return address has not been corrupted during the execution of the
|
||||
// function. The return address is stored in a 'shadow call stack' addressed
|
||||
// using the %gs segment register.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "X86.h"
|
||||
#include "X86InstrBuilder.h"
|
||||
#include "X86InstrInfo.h"
|
||||
#include "X86Subtarget.h"
|
||||
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
||||
#include "llvm/CodeGen/MachineModuleInfo.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/CodeGen/Passes.h"
|
||||
#include "llvm/CodeGen/TargetInstrInfo.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
namespace {
|
||||
|
||||
class ShadowCallStack : public MachineFunctionPass {
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
ShadowCallStack() : MachineFunctionPass(ID) {
|
||||
initializeShadowCallStackPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
||||
MachineFunctionPass::getAnalysisUsage(AU);
|
||||
}
|
||||
|
||||
bool runOnMachineFunction(MachineFunction &Fn) override;
|
||||
|
||||
private:
|
||||
// Do not instrument leaf functions with this many or fewer instructions. The
|
||||
// shadow call stack instrumented prolog/epilog are slightly race-y reading
|
||||
// and checking the saved return address, so it is better to not instrument
|
||||
// functions that have fewer instructions than the instrumented prolog/epilog
|
||||
// race.
|
||||
static const size_t SkipLeafInstructions = 3;
|
||||
};
|
||||
|
||||
char ShadowCallStack::ID = 0;
|
||||
} // end anonymous namespace.
|
||||
|
||||
static void addProlog(MachineFunction &Fn, const TargetInstrInfo *TII,
|
||||
MachineBasicBlock &MBB, const DebugLoc &DL);
|
||||
static void addPrologLeaf(MachineFunction &Fn, const TargetInstrInfo *TII,
|
||||
MachineBasicBlock &MBB, const DebugLoc &DL,
|
||||
MCPhysReg FreeRegister);
|
||||
|
||||
static void addEpilog(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
|
||||
MachineInstr &MI, MachineBasicBlock &TrapBB);
|
||||
static void addEpilogLeaf(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
|
||||
MachineInstr &MI, MachineBasicBlock &TrapBB,
|
||||
MCPhysReg FreeRegister);
|
||||
// Generate a longer epilog that only uses r10 when a tailcall branches to r11.
|
||||
static void addEpilogOnlyR10(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
|
||||
MachineInstr &MI, MachineBasicBlock &TrapBB);
|
||||
|
||||
// Helper function to add ModR/M references for [Seg: Reg + Offset] memory
|
||||
// accesses
|
||||
static inline const MachineInstrBuilder &
|
||||
addSegmentedMem(const MachineInstrBuilder &MIB, MCPhysReg Seg, MCPhysReg Reg,
|
||||
int Offset = 0) {
|
||||
return MIB.addReg(Reg).addImm(1).addReg(0).addImm(Offset).addReg(Seg);
|
||||
}
|
||||
|
||||
static void addProlog(MachineFunction &Fn, const TargetInstrInfo *TII,
|
||||
MachineBasicBlock &MBB, const DebugLoc &DL) {
|
||||
const MCPhysReg ReturnReg = X86::R10;
|
||||
const MCPhysReg OffsetReg = X86::R11;
|
||||
|
||||
auto MBBI = MBB.begin();
|
||||
// mov r10, [rsp]
|
||||
addDirectMem(BuildMI(MBB, MBBI, DL, TII->get(X86::MOV64rm)).addDef(ReturnReg),
|
||||
X86::RSP);
|
||||
// xor r11, r11
|
||||
BuildMI(MBB, MBBI, DL, TII->get(X86::XOR64rr))
|
||||
.addDef(OffsetReg)
|
||||
.addReg(OffsetReg, RegState::Undef)
|
||||
.addReg(OffsetReg, RegState::Undef);
|
||||
// add QWORD [gs:r11], 8
|
||||
addSegmentedMem(BuildMI(MBB, MBBI, DL, TII->get(X86::ADD64mi8)), X86::GS,
|
||||
OffsetReg)
|
||||
.addImm(8);
|
||||
// mov r11, [gs:r11]
|
||||
addSegmentedMem(
|
||||
BuildMI(MBB, MBBI, DL, TII->get(X86::MOV64rm)).addDef(OffsetReg), X86::GS,
|
||||
OffsetReg);
|
||||
// mov [gs:r11], r10
|
||||
addSegmentedMem(BuildMI(MBB, MBBI, DL, TII->get(X86::MOV64mr)), X86::GS,
|
||||
OffsetReg)
|
||||
.addReg(ReturnReg);
|
||||
}
|
||||
|
||||
static void addPrologLeaf(MachineFunction &Fn, const TargetInstrInfo *TII,
|
||||
MachineBasicBlock &MBB, const DebugLoc &DL,
|
||||
MCPhysReg FreeRegister) {
|
||||
// mov REG, [rsp]
|
||||
addDirectMem(BuildMI(MBB, MBB.begin(), DL, TII->get(X86::MOV64rm))
|
||||
.addDef(FreeRegister),
|
||||
X86::RSP);
|
||||
}
|
||||
|
||||
static void addEpilog(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
|
||||
MachineInstr &MI, MachineBasicBlock &TrapBB) {
|
||||
const DebugLoc &DL = MI.getDebugLoc();
|
||||
|
||||
// xor r11, r11
|
||||
BuildMI(MBB, MI, DL, TII->get(X86::XOR64rr))
|
||||
.addDef(X86::R11)
|
||||
.addReg(X86::R11, RegState::Undef)
|
||||
.addReg(X86::R11, RegState::Undef);
|
||||
// mov r10, [gs:r11]
|
||||
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
|
||||
X86::GS, X86::R11);
|
||||
// mov r10, [gs:r10]
|
||||
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
|
||||
X86::GS, X86::R10);
|
||||
// sub QWORD [gs:r11], 8
|
||||
// This instruction should not be moved up to avoid a signal race.
|
||||
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::SUB64mi8)),
|
||||
X86::GS, X86::R11)
|
||||
.addImm(8);
|
||||
// cmp [rsp], r10
|
||||
addDirectMem(BuildMI(MBB, MI, DL, TII->get(X86::CMP64mr)), X86::RSP)
|
||||
.addReg(X86::R10);
|
||||
// jne trap
|
||||
BuildMI(MBB, MI, DL, TII->get(X86::JNE_1)).addMBB(&TrapBB);
|
||||
MBB.addSuccessor(&TrapBB);
|
||||
}
|
||||
|
||||
static void addEpilogLeaf(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
|
||||
MachineInstr &MI, MachineBasicBlock &TrapBB,
|
||||
MCPhysReg FreeRegister) {
|
||||
const DebugLoc &DL = MI.getDebugLoc();
|
||||
|
||||
// cmp [rsp], REG
|
||||
addDirectMem(BuildMI(MBB, MI, DL, TII->get(X86::CMP64mr)), X86::RSP)
|
||||
.addReg(FreeRegister);
|
||||
// jne trap
|
||||
BuildMI(MBB, MI, DL, TII->get(X86::JNE_1)).addMBB(&TrapBB);
|
||||
MBB.addSuccessor(&TrapBB);
|
||||
}
|
||||
|
||||
static void addEpilogOnlyR10(const TargetInstrInfo *TII, MachineBasicBlock &MBB,
|
||||
MachineInstr &MI, MachineBasicBlock &TrapBB) {
|
||||
const DebugLoc &DL = MI.getDebugLoc();
|
||||
|
||||
// xor r10, r10
|
||||
BuildMI(MBB, MI, DL, TII->get(X86::XOR64rr))
|
||||
.addDef(X86::R10)
|
||||
.addReg(X86::R10, RegState::Undef)
|
||||
.addReg(X86::R10, RegState::Undef);
|
||||
// mov r10, [gs:r10]
|
||||
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
|
||||
X86::GS, X86::R10);
|
||||
// mov r10, [gs:r10]
|
||||
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::MOV64rm)).addDef(X86::R10),
|
||||
X86::GS, X86::R10);
|
||||
// sub QWORD [gs:0], 8
|
||||
// This instruction should not be moved up to avoid a signal race.
|
||||
addSegmentedMem(BuildMI(MBB, MI, DL, TII->get(X86::SUB64mi8)), X86::GS, 0)
|
||||
.addImm(8);
|
||||
// cmp [rsp], r10
|
||||
addDirectMem(BuildMI(MBB, MI, DL, TII->get(X86::CMP64mr)), X86::RSP)
|
||||
.addReg(X86::R10);
|
||||
// jne trap
|
||||
BuildMI(MBB, MI, DL, TII->get(X86::JNE_1)).addMBB(&TrapBB);
|
||||
MBB.addSuccessor(&TrapBB);
|
||||
}
|
||||
|
||||
bool ShadowCallStack::runOnMachineFunction(MachineFunction &Fn) {
|
||||
if (!Fn.getFunction().hasFnAttribute(Attribute::ShadowCallStack) ||
|
||||
Fn.getFunction().hasFnAttribute(Attribute::Naked))
|
||||
return false;
|
||||
|
||||
if (Fn.empty() || !Fn.getRegInfo().tracksLiveness())
|
||||
return false;
|
||||
|
||||
// FIXME: Skip functions that have r10 or r11 live on entry (r10 can be live
|
||||
// on entry for parameters with the nest attribute.)
|
||||
if (Fn.front().isLiveIn(X86::R10) || Fn.front().isLiveIn(X86::R11))
|
||||
return false;
|
||||
|
||||
// FIXME: Skip functions with conditional and r10 tail calls for now.
|
||||
bool HasReturn = false;
|
||||
for (auto &MBB : Fn) {
|
||||
if (MBB.empty())
|
||||
continue;
|
||||
|
||||
const MachineInstr &MI = MBB.instr_back();
|
||||
if (MI.isReturn())
|
||||
HasReturn = true;
|
||||
|
||||
if (MI.isReturn() && MI.isCall()) {
|
||||
if (MI.findRegisterUseOperand(X86::EFLAGS))
|
||||
return false;
|
||||
// This should only be possible on Windows 64 (see GR64_TC versus
|
||||
// GR64_TCW64.)
|
||||
if (MI.findRegisterUseOperand(X86::R10) ||
|
||||
MI.hasRegisterImplicitUseOperand(X86::R10))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!HasReturn)
|
||||
return false;
|
||||
|
||||
// For leaf functions:
|
||||
// 1. Do not instrument very short functions where it would not improve that
|
||||
// function's security.
|
||||
// 2. Detect if there is an unused caller-saved register we can reserve to
|
||||
// hold the return address instead of writing/reading it from the shadow
|
||||
// call stack.
|
||||
MCPhysReg LeafFuncRegister = X86::NoRegister;
|
||||
if (!Fn.getFrameInfo().adjustsStack()) {
|
||||
size_t InstructionCount = 0;
|
||||
std::bitset<X86::NUM_TARGET_REGS> UsedRegs;
|
||||
for (auto &MBB : Fn) {
|
||||
for (auto &LiveIn : MBB.liveins())
|
||||
UsedRegs.set(LiveIn.PhysReg);
|
||||
for (auto &MI : MBB) {
|
||||
if (!MI.isDebugValue() && !MI.isCFIInstruction() && !MI.isLabel())
|
||||
InstructionCount++;
|
||||
for (auto &Op : MI.operands())
|
||||
if (Op.isReg() && Op.isDef())
|
||||
UsedRegs.set(Op.getReg());
|
||||
}
|
||||
}
|
||||
|
||||
if (InstructionCount <= SkipLeafInstructions)
|
||||
return false;
|
||||
|
||||
std::bitset<X86::NUM_TARGET_REGS> CalleeSavedRegs;
|
||||
const MCPhysReg *CSRegs = Fn.getRegInfo().getCalleeSavedRegs();
|
||||
for (size_t i = 0; CSRegs[i]; i++)
|
||||
CalleeSavedRegs.set(CSRegs[i]);
|
||||
|
||||
const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
|
||||
for (auto &Reg : X86::GR64_NOSPRegClass.getRegisters()) {
|
||||
// FIXME: Optimization opportunity: spill/restore a callee-saved register
|
||||
// if a caller-saved register is unavailable.
|
||||
if (CalleeSavedRegs.test(Reg))
|
||||
continue;
|
||||
|
||||
bool Used = false;
|
||||
for (MCSubRegIterator SR(Reg, TRI, true); SR.isValid(); ++SR)
|
||||
if ((Used = UsedRegs.test(*SR)))
|
||||
break;
|
||||
|
||||
if (!Used) {
|
||||
LeafFuncRegister = Reg;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const bool LeafFuncOptimization = LeafFuncRegister != X86::NoRegister;
|
||||
if (LeafFuncOptimization)
|
||||
// Mark the leaf function register live-in for all MBBs except the entry MBB
|
||||
for (auto I = ++Fn.begin(), E = Fn.end(); I != E; ++I)
|
||||
I->addLiveIn(LeafFuncRegister);
|
||||
|
||||
MachineBasicBlock &MBB = Fn.front();
|
||||
const MachineBasicBlock *NonEmpty = MBB.empty() ? MBB.getFallThrough() : &MBB;
|
||||
const DebugLoc &DL = NonEmpty->front().getDebugLoc();
|
||||
|
||||
const TargetInstrInfo *TII = Fn.getSubtarget().getInstrInfo();
|
||||
if (LeafFuncOptimization)
|
||||
addPrologLeaf(Fn, TII, MBB, DL, LeafFuncRegister);
|
||||
else
|
||||
addProlog(Fn, TII, MBB, DL);
|
||||
|
||||
MachineBasicBlock *Trap = nullptr;
|
||||
for (auto &MBB : Fn) {
|
||||
if (MBB.empty())
|
||||
continue;
|
||||
|
||||
MachineInstr &MI = MBB.instr_back();
|
||||
if (MI.isReturn()) {
|
||||
if (!Trap) {
|
||||
Trap = Fn.CreateMachineBasicBlock();
|
||||
BuildMI(Trap, MI.getDebugLoc(), TII->get(X86::TRAP));
|
||||
Fn.push_back(Trap);
|
||||
}
|
||||
|
||||
if (LeafFuncOptimization)
|
||||
addEpilogLeaf(TII, MBB, MI, *Trap, LeafFuncRegister);
|
||||
else if (MI.findRegisterUseOperand(X86::R11))
|
||||
addEpilogOnlyR10(TII, MBB, MI, *Trap);
|
||||
else
|
||||
addEpilog(TII, MBB, MI, *Trap);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
INITIALIZE_PASS(ShadowCallStack, "shadow-call-stack", "Shadow Call Stack",
|
||||
false, false)
|
||||
|
||||
FunctionPass *llvm::createShadowCallStackPass() {
|
||||
return new ShadowCallStack();
|
||||
}
|
@ -49,11 +49,6 @@ FunctionPass *createX86FloatingPointStackifierPass();
|
||||
/// transition penalty between functions encoded with AVX and SSE.
|
||||
FunctionPass *createX86IssueVZeroUpperPass();
|
||||
|
||||
/// This pass instruments the function prolog to save the return address to a
|
||||
/// 'shadow call stack' and the function epilog to check that the return address
|
||||
/// did not change during function execution.
|
||||
FunctionPass *createShadowCallStackPass();
|
||||
|
||||
/// This pass inserts ENDBR instructions before indirect jump/call
|
||||
/// destinations as part of CET IBT mechanism.
|
||||
FunctionPass *createX86IndirectBranchTrackingPass();
|
||||
@ -137,7 +132,6 @@ FunctionPass *createX86SpeculativeLoadHardeningPass();
|
||||
void initializeEvexToVexInstPassPass(PassRegistry &);
|
||||
void initializeFixupBWInstPassPass(PassRegistry &);
|
||||
void initializeFixupLEAPassPass(PassRegistry &);
|
||||
void initializeShadowCallStackPass(PassRegistry &);
|
||||
void initializeWinEHStatePassPass(PassRegistry &);
|
||||
void initializeX86AvoidSFBPassPass(PassRegistry &);
|
||||
void initializeX86CallFrameOptimizationPass(PassRegistry &);
|
||||
|
@ -69,7 +69,6 @@ extern "C" void LLVMInitializeX86Target() {
|
||||
initializeFixupBWInstPassPass(PR);
|
||||
initializeEvexToVexInstPassPass(PR);
|
||||
initializeFixupLEAPassPass(PR);
|
||||
initializeShadowCallStackPass(PR);
|
||||
initializeX86CallFrameOptimizationPass(PR);
|
||||
initializeX86CmovConverterPassPass(PR);
|
||||
initializeX86ExecutionDomainFixPass(PR);
|
||||
@ -489,7 +488,6 @@ void X86PassConfig::addPreEmitPass() {
|
||||
addPass(createBreakFalseDeps());
|
||||
}
|
||||
|
||||
addPass(createShadowCallStackPass());
|
||||
addPass(createX86IndirectBranchTrackingPass());
|
||||
|
||||
if (UseVZeroUpper)
|
||||
|
@ -55,7 +55,6 @@
|
||||
; CHECK-NEXT: Post-RA pseudo instruction expansion pass
|
||||
; CHECK-NEXT: X86 pseudo instruction expansion pass
|
||||
; CHECK-NEXT: Analyze Machine Code For Garbage Collection
|
||||
; CHECK-NEXT: Shadow Call Stack
|
||||
; CHECK-NEXT: X86 Indirect Branch Tracking
|
||||
; CHECK-NEXT: X86 vzeroupper inserter
|
||||
; CHECK-NEXT: X86 Discriminate Memory Operands
|
||||
|
@ -150,7 +150,6 @@
|
||||
; CHECK-NEXT: ReachingDefAnalysis
|
||||
; CHECK-NEXT: X86 Execution Dependency Fix
|
||||
; CHECK-NEXT: BreakFalseDeps
|
||||
; CHECK-NEXT: Shadow Call Stack
|
||||
; CHECK-NEXT: X86 Indirect Branch Tracking
|
||||
; CHECK-NEXT: X86 vzeroupper inserter
|
||||
; CHECK-NEXT: MachineDominator Tree Construction
|
||||
|
@ -1,212 +0,0 @@
|
||||
# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass shadow-call-stack -verify-machineinstrs -o - %s | FileCheck %s
|
||||
--- |
|
||||
|
||||
define void @no_return() #0 { ret void }
|
||||
define void @normal_return() #0 { ret void }
|
||||
define void @normal_return_leaf_func() #0 { ret void }
|
||||
define void @short_leaf_func() #0 { ret void }
|
||||
define void @normal_tail_call() #0 { ret void }
|
||||
define void @r11_tail_call() #0 { ret void }
|
||||
define void @conditional_tail_call() #0 { ret void }
|
||||
define void @r10_live_in() #0 { ret void }
|
||||
|
||||
attributes #0 = { shadowcallstack }
|
||||
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: no_return
|
||||
name: no_return
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: true # not a leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
bb.0:
|
||||
; CHECK-NEXT: $eax = MOV32ri 13
|
||||
$eax = MOV32ri 13
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: normal_return
|
||||
name: normal_return
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: true # not a leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
bb.0:
|
||||
; CHECK: $r10 = MOV64rm $rsp, 1, $noreg, 0, $noreg
|
||||
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
|
||||
; CHECK-NEXT: ADD64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
|
||||
; CHECK-NEXT: $r11 = MOV64rm $r11, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: MOV64mr $r11, 1, $noreg, 0, $gs, $r10
|
||||
; CHECK-NEXT: $eax = MOV32ri 13
|
||||
$eax = MOV32ri 13
|
||||
|
||||
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
|
||||
; CHECK-NEXT: $r10 = MOV64rm $r11, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: SUB64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
|
||||
; CHECK-NEXT: CMP64mr $rsp, 1, $noreg, 0, $noreg, $r10, implicit-def $eflags
|
||||
; CHECK-NEXT: JNE_1 %bb.1, implicit $eflags
|
||||
; CHECK-NEXT: RETQ $eax
|
||||
RETQ $eax
|
||||
|
||||
; CHECK: bb.1:
|
||||
; CHECK-NEXT; TRAP
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: normal_return_leaf_func
|
||||
name: normal_return_leaf_func
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: false # leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
; CHECK: liveins: $rcx
|
||||
bb.0:
|
||||
liveins: $rcx
|
||||
|
||||
; CHECK: $rdx = MOV64rm $rsp, 1, $noreg, 0, $noreg
|
||||
; CHECK-NEXT: $eax = MOV32ri 0
|
||||
$eax = MOV32ri 0
|
||||
; CHECK-NEXT: CMP64ri8 $rcx, 5, implicit-def $eflags
|
||||
CMP64ri8 $rcx, 5, implicit-def $eflags
|
||||
; CHECK-NEXT: JA_1 %bb.1, implicit $eflags
|
||||
JA_1 %bb.1, implicit $eflags
|
||||
; CHECK-NEXT: JMP_1 %bb.2
|
||||
JMP_1 %bb.2
|
||||
|
||||
; CHECK: bb.1
|
||||
; CHECK: liveins: $eax, $rdx
|
||||
bb.1:
|
||||
liveins: $eax
|
||||
|
||||
; CHECKT: $eax = MOV32ri 1
|
||||
$eax = MOV32ri 1
|
||||
|
||||
; CHECK: bb.2
|
||||
; CHECK: liveins: $eax, $rdx
|
||||
bb.2:
|
||||
liveins: $eax
|
||||
|
||||
; CHECK: CMP64mr $rsp, 1, $noreg, 0, $noreg, $rdx, implicit-def $eflags
|
||||
; CHECK-NEXT: JNE_1 %bb.3, implicit $eflags
|
||||
; CHECK-NEXT: RETQ $eax
|
||||
RETQ $eax
|
||||
|
||||
; CHECK: bb.3:
|
||||
; CHECK-NEXT; TRAP
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: short_leaf_func
|
||||
name: short_leaf_func
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: false # leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
bb.0:
|
||||
; Ensure these are not counted as machine instructions
|
||||
CFI_INSTRUCTION 0
|
||||
CFI_INSTRUCTION 0
|
||||
CFI_INSTRUCTION 0
|
||||
DBG_VALUE 0
|
||||
DBG_VALUE 0
|
||||
DBG_VALUE 0
|
||||
|
||||
; CHECK: $eax = MOV32ri 13
|
||||
$eax = MOV32ri 13
|
||||
|
||||
; CHECK-NEXT: RETQ $eax
|
||||
RETQ $eax
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: normal_tail_call
|
||||
name: normal_tail_call
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: true # not a leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
bb.0:
|
||||
; CHECK: $r10 = MOV64rm $rsp, 1, $noreg, 0, $noreg
|
||||
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
|
||||
; CHECK-NEXT: ADD64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
|
||||
; CHECK-NEXT: $r11 = MOV64rm $r11, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: MOV64mr $r11, 1, $noreg, 0, $gs, $r10
|
||||
; CHECK-NEXT: $eax = MOV32ri 13
|
||||
$eax = MOV32ri 13
|
||||
|
||||
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
|
||||
; CHECK-NEXT: $r10 = MOV64rm $r11, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: SUB64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
|
||||
; CHECK-NEXT: CMP64mr $rsp, 1, $noreg, 0, $noreg, $r10, implicit-def $eflags
|
||||
; CHECK-NEXT: JNE_1 %bb.1, implicit $eflags
|
||||
; CHECK-NEXT: TAILJMPr64 $rax
|
||||
TAILJMPr64 $rax
|
||||
|
||||
; CHECK: bb.1:
|
||||
; CHECK-NEXT; TRAP
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: r11_tail_call
|
||||
name: r11_tail_call
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: true # not a leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
bb.0:
|
||||
; CHECK: $r10 = MOV64rm $rsp, 1, $noreg, 0, $noreg
|
||||
; CHECK-NEXT: $r11 = XOR64rr undef $r11, undef $r11, implicit-def $eflags
|
||||
; CHECK-NEXT: ADD64mi8 $r11, 1, $noreg, 0, $gs, 8, implicit-def $eflags
|
||||
; CHECK-NEXT: $r11 = MOV64rm $r11, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: MOV64mr $r11, 1, $noreg, 0, $gs, $r10
|
||||
; CHECK-NEXT: $eax = MOV32ri 13
|
||||
$eax = MOV32ri 13
|
||||
|
||||
; CHECK-NEXT: $r10 = XOR64rr undef $r10, undef $r10, implicit-def $eflags
|
||||
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: $r10 = MOV64rm $r10, 1, $noreg, 0, $gs
|
||||
; CHECK-NEXT: SUB64mi8 $noreg, 1, $noreg, 0, $gs, 8, implicit-def $eflags
|
||||
; CHECK-NEXT: CMP64mr $rsp, 1, $noreg, 0, $noreg, $r10, implicit-def $eflags
|
||||
; CHECK-NEXT: JNE_1 %bb.1, implicit $eflags
|
||||
; CHECK-NEXT: TAILJMPr64 undef $r11
|
||||
TAILJMPr64 undef $r11
|
||||
|
||||
; CHECK: bb.1:
|
||||
; CHECK-NEXT; TRAP
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: conditional_tail_call
|
||||
name: conditional_tail_call
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: true # not a leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
bb.0:
|
||||
; CHECK: $eax = MOV32ri 13
|
||||
$eax = MOV32ri 13
|
||||
|
||||
; CHECK-NEXT: TAILJMPd64_CC @conditional_tail_call, undef $eflags
|
||||
TAILJMPd64_CC @conditional_tail_call, undef $eflags
|
||||
...
|
||||
---
|
||||
# CHECK-LABEL: name: r10_live_in
|
||||
name: r10_live_in
|
||||
tracksRegLiveness: true
|
||||
frameInfo:
|
||||
adjustsStack: true # not a leaf function
|
||||
body: |
|
||||
; CHECK: bb.0:
|
||||
; CHECK: liveins: $r10
|
||||
bb.0:
|
||||
liveins: $r10
|
||||
|
||||
; CHECK: $eax = MOV32ri 13
|
||||
$eax = MOV32ri 13
|
||||
; CHECK-NEXT: RETQ $eax
|
||||
RETQ $eax
|
||||
...
|
@ -76,7 +76,6 @@ static_library("LLVMX86CodeGen") {
|
||||
deps += [ ":X86GenFoldTables" ]
|
||||
}
|
||||
sources = [
|
||||
"ShadowCallStack.cpp",
|
||||
"X86AsmPrinter.cpp",
|
||||
"X86AvoidStoreForwardingBlocks.cpp",
|
||||
"X86CallFrameOptimization.cpp",
|
||||
|
Loading…
Reference in New Issue
Block a user