mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-14 07:09:08 +00:00
[AMDGPU] Increase max iterations count to analyze complete unroll
Summary: In some cases inner loops may not get boosts so try to analyze them deeper. Reviewers: rampitec, mzolotukhin Reviewed By: rampitec Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, zzheng, kerbowa, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81204
This commit is contained in:
parent
c66ff992b5
commit
cf9aab289e
@ -503,6 +503,9 @@ public:
|
||||
/// This value is used in the same manner to limit the size of the inner
|
||||
/// loop.
|
||||
unsigned UnrollAndJamInnerLoopThreshold;
|
||||
/// Don't allow loop unrolling to simulate more than this number of
|
||||
/// iterations when checking full unroll profitability
|
||||
unsigned MaxIterationsCountToAnalyze;
|
||||
};
|
||||
|
||||
/// Get target-customized preferences for the generic loop unrolling
|
||||
|
@ -79,6 +79,11 @@ static cl::opt<bool> UseLegacyDA(
|
||||
cl::desc("Enable legacy divergence analysis for AMDGPU"),
|
||||
cl::init(false), cl::Hidden);
|
||||
|
||||
static cl::opt<unsigned> UnrollMaxBlockToAnalyze(
|
||||
"amdgpu-unroll-max-block-to-analyze",
|
||||
cl::desc("Inner loop block size threshold to analyze in unroll for AMDGPU"),
|
||||
cl::init(20), cl::Hidden);
|
||||
|
||||
static bool dependsOnLocalPhi(const Loop *L, const Value *Cond,
|
||||
unsigned Depth = 0) {
|
||||
const Instruction *I = dyn_cast<Instruction>(Cond);
|
||||
@ -223,6 +228,11 @@ void AMDGPUTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
|
||||
if (UP.Threshold >= MaxBoost)
|
||||
return;
|
||||
}
|
||||
|
||||
// If we got a GEP in a small BB from inner loop then increase max trip
|
||||
// count to analyze for better estimation cost in unroll
|
||||
if (L->empty() && BB->size() < UnrollMaxBlockToAnalyze)
|
||||
UP.MaxIterationsCountToAnalyze = 32;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,6 +223,7 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences(
|
||||
UP.UnrollAndJam = false;
|
||||
UP.PeelProfiledIterations = true;
|
||||
UP.UnrollAndJamInnerLoopThreshold = 60;
|
||||
UP.MaxIterationsCountToAnalyze = UnrollMaxIterationsCountToAnalyze;
|
||||
|
||||
// Override with any target specific settings
|
||||
TTI.getUnrollingPreferences(L, SE, UP);
|
||||
@ -264,6 +265,8 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences(
|
||||
UP.AllowLoopNestsPeeling = UnrollAllowLoopNestsPeeling;
|
||||
if (UnrollUnrollRemainder.getNumOccurrences() > 0)
|
||||
UP.UnrollRemainder = UnrollUnrollRemainder;
|
||||
if (UnrollMaxIterationsCountToAnalyze.getNumOccurrences() > 0)
|
||||
UP.MaxIterationsCountToAnalyze = UnrollMaxIterationsCountToAnalyze;
|
||||
|
||||
// Apply user values provided by argument
|
||||
if (UserThreshold.hasValue()) {
|
||||
@ -353,11 +356,12 @@ struct EstimatedUnrollCost {
|
||||
static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
|
||||
const Loop *L, unsigned TripCount, DominatorTree &DT, ScalarEvolution &SE,
|
||||
const SmallPtrSetImpl<const Value *> &EphValues,
|
||||
const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize) {
|
||||
const TargetTransformInfo &TTI, unsigned MaxUnrolledLoopSize,
|
||||
unsigned MaxIterationsCountToAnalyze) {
|
||||
// We want to be able to scale offsets by the trip count and add more offsets
|
||||
// to them without checking for overflows, and we already don't want to
|
||||
// analyze *massive* trip counts, so we force the max to be reasonably small.
|
||||
assert(UnrollMaxIterationsCountToAnalyze <
|
||||
assert(MaxIterationsCountToAnalyze <
|
||||
(unsigned)(std::numeric_limits<int>::max() / 2) &&
|
||||
"The unroll iterations max is too large!");
|
||||
|
||||
@ -367,8 +371,7 @@ static Optional<EstimatedUnrollCost> analyzeLoopUnrollCost(
|
||||
return None;
|
||||
|
||||
// Don't simulate loops with a big or unknown tripcount
|
||||
if (!UnrollMaxIterationsCountToAnalyze || !TripCount ||
|
||||
TripCount > UnrollMaxIterationsCountToAnalyze)
|
||||
if (!TripCount || TripCount > MaxIterationsCountToAnalyze)
|
||||
return None;
|
||||
|
||||
SmallSetVector<BasicBlock *, 16> BBWorklist;
|
||||
@ -845,7 +848,8 @@ bool llvm::computeUnrollCount(
|
||||
// To check that, run additional analysis on the loop.
|
||||
if (Optional<EstimatedUnrollCost> Cost = analyzeLoopUnrollCost(
|
||||
L, FullUnrollTripCount, DT, SE, EphValues, TTI,
|
||||
UP.Threshold * UP.MaxPercentThresholdBoost / 100)) {
|
||||
UP.Threshold * UP.MaxPercentThresholdBoost / 100,
|
||||
UP.MaxIterationsCountToAnalyze)) {
|
||||
unsigned Boost =
|
||||
getFullUnrollBoostingFactor(*Cost, UP.MaxPercentThresholdBoost);
|
||||
if (Cost->UnrolledCost < UP.Threshold * Boost / 100) {
|
||||
|
@ -0,0 +1,49 @@
|
||||
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -loop-unroll -unroll-threshold=150 < %s | FileCheck %s
|
||||
|
||||
; Test that max iterations count to analyze (specific for the target)
|
||||
; is enough to make the inner loop completely unrolled
|
||||
define hidden void @foo(float addrspace(1)* %ptrG, float addrspace(3)* %ptrL, i32 %A, i32 %A2, i32 %M) {
|
||||
bb:
|
||||
br label %bb2
|
||||
|
||||
bb2: ; preds = %bb7, %bb
|
||||
%i = phi i32 [ 0, %bb ], [ %i8, %bb7 ]
|
||||
br label %bb4
|
||||
|
||||
bb3: ; preds = %bb7
|
||||
ret void
|
||||
|
||||
bb4: ; preds = %bb10, %bb2
|
||||
%i5 = phi i32 [ 0, %bb2 ], [ %i11, %bb10 ]
|
||||
%i6 = add nuw nsw i32 %i5, %i
|
||||
br label %for.body
|
||||
|
||||
bb7: ; preds = %bb10
|
||||
%i8 = add nuw nsw i32 %i, 1
|
||||
%i9 = icmp eq i32 %i8, 8
|
||||
br i1 %i9, label %bb3, label %bb2
|
||||
|
||||
bb10: ; preds = %for.body
|
||||
%i11 = add nuw nsw i32 %i5, 1
|
||||
%cmpj = icmp ult i32 %i11, 8
|
||||
br i1 %cmpj, label %bb7, label %bb4
|
||||
|
||||
; CHECK: for.body:
|
||||
; CHECK-NOT: %phi = phi {{.*}}
|
||||
for.body: ; preds = %for.body, %bb4
|
||||
%phi = phi i32 [ 0, %bb4 ], [ %k, %for.body ]
|
||||
%mul = shl nuw nsw i32 %phi, 5
|
||||
%add1 = add i32 %A, %mul
|
||||
%add2 = add i32 %add1, %M
|
||||
%arrayidx = getelementptr inbounds float, float addrspace(3)* %ptrL, i32 %add2
|
||||
%bc = bitcast float addrspace(3)* %arrayidx to i32 addrspace(3)*
|
||||
%ld = load i32, i32 addrspace(3)* %bc, align 4
|
||||
%mul2 = shl nuw nsw i32 %phi, 3
|
||||
%add3 = add nuw nsw i32 %mul2, %A2
|
||||
%arrayidx2 = getelementptr inbounds float, float addrspace(1)* %ptrG, i32 %add3
|
||||
%bc2 = bitcast float addrspace(1)* %arrayidx2 to i32 addrspace(1)*
|
||||
store i32 %ld, i32 addrspace(1)* %bc2, align 4
|
||||
%k = add nuw nsw i32 %phi, 1
|
||||
%cmpk = icmp ult i32 %k, 32
|
||||
br i1 %cmpk, label %for.body, label %bb10
|
||||
}
|
Loading…
Reference in New Issue
Block a user