mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-19 02:03:11 +00:00
[GVN] Small tweaks to comments, style, and missed vector handling
Noticed these while doing a final sweep of the code to make sure I hadn't missed anything in my last couple of patches. The (minor) missed optimization was noticed because of the stylistic fix to avoid an overly specific cast. llvm-svn: 354412
This commit is contained in:
parent
88f7218b05
commit
6b9bc58980
@ -36,8 +36,8 @@ bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
|
||||
// As a special case, allow coercion of memset used to initialize
|
||||
// an array w/null. Despite non-integral pointers not generally having a
|
||||
// specific bit pattern, we do assume null is zero.
|
||||
if (auto *CI = dyn_cast<ConstantInt>(StoredVal))
|
||||
return CI->isZero();
|
||||
if (auto *CI = dyn_cast<Constant>(StoredVal))
|
||||
return CI->isNullValue();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -287,9 +287,8 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
|
||||
// If this is memset, we just need to see if the offset is valid in the size
|
||||
// of the memset..
|
||||
if (MI->getIntrinsicID() == Intrinsic::memset) {
|
||||
Value *StoredVal = cast<MemSetInst>(MI)->getValue();
|
||||
if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) {
|
||||
auto *CI = dyn_cast<ConstantInt>(StoredVal);
|
||||
auto *CI = dyn_cast<ConstantInt>(cast<MemSetInst>(MI)->getValue());
|
||||
if (!CI || !CI->isZero())
|
||||
return -1;
|
||||
}
|
||||
@ -316,7 +315,8 @@ int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
|
||||
if (Offset == -1)
|
||||
return Offset;
|
||||
|
||||
// Don't coerce non-integral pointers to integers or vice versa.
|
||||
// Don't coerce non-integral pointers to integers or vice versa, and the
|
||||
// memtransfer is implicitly a raw byte code
|
||||
if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
|
||||
// TODO: Can allow nullptrs from constant zeros
|
||||
return -1;
|
||||
|
@ -139,18 +139,32 @@ define <1 x i8 addrspace(4)*> @neg_forward_store_vload(<1 x i8 addrspace(4)*> ad
|
||||
ret <1 x i8 addrspace(4)*> %ref
|
||||
}
|
||||
|
||||
; TODO: missed optimization, we can forward the null.
|
||||
; Nulls have known bit patterns, so we can forward
|
||||
define i8 addrspace(4)* @forward_store_zero(i8 addrspace(4)* addrspace(4)* %loc) {
|
||||
; CHECK-LABEL: @forward_store_zero(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
|
||||
; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]]
|
||||
; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]]
|
||||
; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
|
||||
; CHECK-NEXT: store i64 0, i64 addrspace(4)* [[LOC_BC]]
|
||||
; CHECK-NEXT: ret i8 addrspace(4)* null
|
||||
;
|
||||
entry:
|
||||
%loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i64 addrspace(4)*
|
||||
store i64 5, i64 addrspace(4)* %loc.bc
|
||||
store i64 0, i64 addrspace(4)* %loc.bc
|
||||
%ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
|
||||
ret i8 addrspace(4)* %ref
|
||||
}
|
||||
|
||||
; Nulls have known bit patterns, so we can forward
|
||||
define i8 addrspace(4)* @forward_store_zero2(i8 addrspace(4)* addrspace(4)* %loc) {
|
||||
; CHECK-LABEL: @forward_store_zero2(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i32> addrspace(4)*
|
||||
; CHECK-NEXT: store <2 x i32> zeroinitializer, <2 x i32> addrspace(4)* [[LOC_BC]]
|
||||
; CHECK-NEXT: ret i8 addrspace(4)* null
|
||||
;
|
||||
entry:
|
||||
%loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to <2 x i32> addrspace(4)*
|
||||
store <2 x i32> zeroinitializer, <2 x i32> addrspace(4)* %loc.bc
|
||||
%ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
|
||||
ret i8 addrspace(4)* %ref
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user