mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-28 22:00:30 +00:00
[Loads/SROA] Remove blatantly incorrect code and fix a bug revealed in the process
The code we had isSafeToLoadUnconditionally was blatantly wrong. This function takes a "Size" argument which is supposed to describe the span loaded from. Instead, the code use the size of the pointer passed (which may be unrelated!) and only checks that span. For any Size > LoadSize, this can and does lead to miscompiles. Worse, the generic code just a few lines above correctly handles the cases which *are* valid. So, let's delete said code. Removing this code revealed two issues: 1) As noted by jdoerfert the removed code incorrectly handled external globals. The test update in SROA is to stop testing incorrect behavior. 2) SROA was confusing bytes and bits, but this wasn't obvious as the Size parameter was being essentially ignored anyway. Fixed. Differential Revision: https://reviews.llvm.org/D66778 llvm-svn: 370102
This commit is contained in:
parent
d565c170bf
commit
661116b15b
@ -210,51 +210,13 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
|
||||
if (isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT))
|
||||
return true;
|
||||
|
||||
int64_t ByteOffset = 0;
|
||||
Value *Base = V;
|
||||
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
|
||||
|
||||
if (ByteOffset < 0) // out of bounds
|
||||
return false;
|
||||
|
||||
Type *BaseType = nullptr;
|
||||
unsigned BaseAlign = 0;
|
||||
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
|
||||
// An alloca is safe to load from as load as it is suitably aligned.
|
||||
BaseType = AI->getAllocatedType();
|
||||
BaseAlign = AI->getAlignment();
|
||||
} else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
|
||||
// Global variables are not necessarily safe to load from if they are
|
||||
// interposed arbitrarily. Their size may change or they may be weak and
|
||||
// require a test to determine if they were in fact provided.
|
||||
if (!GV->isInterposable()) {
|
||||
BaseType = GV->getType()->getElementType();
|
||||
BaseAlign = GV->getAlignment();
|
||||
}
|
||||
}
|
||||
|
||||
PointerType *AddrTy = cast<PointerType>(V->getType());
|
||||
uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType());
|
||||
|
||||
// If we found a base allocated type from either an alloca or global variable,
|
||||
// try to see if we are definitively within the allocated region. We need to
|
||||
// know the size of the base type and the loaded type to do anything in this
|
||||
// case.
|
||||
if (BaseType && BaseType->isSized()) {
|
||||
if (BaseAlign == 0)
|
||||
BaseAlign = DL.getPrefTypeAlignment(BaseType);
|
||||
|
||||
if (Align <= BaseAlign) {
|
||||
// Check if the load is within the bounds of the underlying object.
|
||||
if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) &&
|
||||
((ByteOffset % Align) == 0))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ScanFrom)
|
||||
return false;
|
||||
|
||||
if (Size.getBitWidth() > 64)
|
||||
return false;
|
||||
const uint64_t LoadSize = Size.getZExtValue();
|
||||
|
||||
// Otherwise, be a little bit aggressive by scanning the local block where we
|
||||
// want to check to see if the pointer is already being loaded or stored
|
||||
// from/to. If so, the previous load or store would have already trapped,
|
||||
@ -302,7 +264,8 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
|
||||
continue;
|
||||
|
||||
// Handle trivial cases.
|
||||
if (AccessedPtr == V)
|
||||
if (AccessedPtr == V &&
|
||||
LoadSize <= DL.getTypeStoreSize(AccessedTy))
|
||||
return true;
|
||||
|
||||
if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
|
||||
|
@ -1218,7 +1218,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) {
|
||||
if (BBI->mayWriteToMemory())
|
||||
return false;
|
||||
|
||||
uint64_t Size = DL.getTypeStoreSizeInBits(LI->getType());
|
||||
uint64_t Size = DL.getTypeStoreSize(LI->getType());
|
||||
MaxAlign = std::max(MaxAlign, LI->getAlignment());
|
||||
MaxSize = MaxSize.ult(Size) ? APInt(APWidth, Size) : MaxSize;
|
||||
HaveLoad = true;
|
||||
|
@ -282,7 +282,9 @@ define void @select_addrspacecast_const_op(i1 %a, i1 %b) {
|
||||
ret void
|
||||
}
|
||||
|
||||
@gv = external addrspace(1) global i64
|
||||
;; If this was external, we wouldn't be able to prove dereferenceability
|
||||
;; of the location.
|
||||
@gv = addrspace(1) global i64 zeroinitializer
|
||||
|
||||
define void @select_addrspacecast_gv(i1 %a, i1 %b) {
|
||||
; CHECK-LABEL: @select_addrspacecast_gv(
|
||||
@ -299,10 +301,11 @@ define void @select_addrspacecast_gv(i1 %a, i1 %b) {
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK-LABEL: @select_addrspacecast_i8(
|
||||
; CHECK: [[SEL:%.*]] = select i1 undef, i8 undef, i8 undef
|
||||
; CHECK-NEXT: ret i8 [[SEL]]
|
||||
define i8 @select_addrspacecast_i8() {
|
||||
; CHECK-LABEL: @select_addrspacecast_i8(
|
||||
; CHECK-NEXT: [[RET_SROA_SPECULATED:%.*]] = select i1 undef, i8 undef, i8 undef
|
||||
; CHECK-NEXT: ret i8 [[RET_SROA_SPECULATED]]
|
||||
;
|
||||
%a = alloca i8
|
||||
%b = alloca i8
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user