[GSoC] Add PolyhedralInfo pass - new interface to polly analysis

Adding a new pass PolyhedralInfo. This pass will be the interface to Polly.
  Initially, we will provide the following interface:
    - #IsParallel(Loop *L) - return a bool depending on whether the loop is
                             parallel or not for the given program order.

Patch by Utpal Bora <cs14mtech11017@iith.ac.in>

Differential Revision: https://reviews.llvm.org/D21486

llvm-svn: 276637
This commit is contained in:
Johannes Doerfert 2016-07-25 12:48:45 +00:00
parent 13c78e4d51
commit 8031238017
24 changed files with 349 additions and 16 deletions

View File

@ -37,6 +37,7 @@ llvm::Pass *createDOTViewerPass();
llvm::Pass *createJSONExporterPass();
llvm::Pass *createJSONImporterPass();
llvm::Pass *createPollyCanonicalizePass();
llvm::Pass *createPolyhedralInfoPass();
llvm::Pass *createScopDetectionPass();
llvm::Pass *createScopInfoRegionPassPass();
llvm::Pass *createScopInfoWrapperPassPass();
@ -72,6 +73,7 @@ struct PollyForcePassLinking {
polly::createScopDetectionPass();
polly::createScopInfoRegionPassPass();
polly::createPollyCanonicalizePass();
polly::createPolyhedralInfoPass();
polly::createIslAstInfoPass();
polly::createCodeGenerationPass();
#ifdef GPU_CODEGEN

View File

@ -0,0 +1,101 @@
//===- polly/PolyhedralInfo.h - PolyhedralInfo class definition -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// This file contains the declaration of the PolyhedralInfo class, which will
/// provide an interface to expose polyhedral analysis information of Polly.
///
/// This is work in progress. We will add more API's as an when deemed required.
//===----------------------------------------------------------------------===///
#ifndef POLLY_POLYHEDRAL_INFO_H
#define POLLY_POLYHEDRAL_INFO_H
#include "llvm/Pass.h"
#include "isl/ctx.h"
#include "isl/union_map.h"
namespace llvm {
class Loop;
}
namespace polly {
class Scop;
class ScopInfoWrapperPass;
class DependenceInfoWrapperPass;
class PolyhedralInfo : public llvm::FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
/// @brief Construct a new PolyhedralInfo pass.
PolyhedralInfo() : FunctionPass(ID) {}
~PolyhedralInfo() {}
/// @brief Check if a given loop is parallel.
///
/// @param L The loop.
///
/// @return Returns true, if loop is parallel false otherwise.
bool isParallel(llvm::Loop *L) const;
/// @brief Return the SCoP containing the @p L loop.
///
/// @param L The loop.
///
/// @return Returns the SCoP containing the given loop.
/// Returns null if the loop is not contained in any SCoP.
const Scop *getScopContainingLoop(llvm::Loop *L) const;
/// @brief Computes the partial schedule for the given @p L loop.
///
/// @param S The SCoP containing the given loop
/// @param L The loop.
///
/// @return Returns the partial schedule for the given loop
__isl_give isl_union_map *getScheduleForLoop(const Scop *S,
llvm::Loop *L) const;
/// @brief Get the SCoP and dependence analysis information for @p F.
bool runOnFunction(llvm::Function &F) override;
/// @brief Release the internal memory.
void releaseMemory() override {}
/// @brief Print to @p OS if each dimension of a loop nest is parallel or not.
void print(llvm::raw_ostream &OS,
const llvm::Module *M = nullptr) const override;
/// @brief Register all analyses and transformation required.
void getAnalysisUsage(llvm::AnalysisUsage &AU) const override;
private:
/// @brief Check if a given loop is parallel or vectorizable.
///
/// @param L The loop.
/// @param MinDepDistPtr If not nullptr, the minimal dependence distance will
/// be returned at the address of that pointer
///
/// @return Returns true if loop is parallel or vectorizable, false
/// otherwise.
bool checkParallel(llvm::Loop *L,
__isl_give isl_pw_aff **MinDepDistPtr = nullptr) const;
ScopInfoWrapperPass *SI;
DependenceInfoWrapperPass *DI;
};
} // end namespace polly
namespace llvm {
class PassRegistry;
void initializePolyhedralInfoPass(llvm::PassRegistry &);
}
#endif

View File

@ -0,0 +1,162 @@
//===--------- PolyhedralInfo.cpp - Create Scops from LLVM IR-------------===//
///
/// The LLVM Compiler Infrastructure
///
/// This file is distributed under the University of Illinois Open Source
/// License. See LICENSE.TXT for details.
///
//===----------------------------------------------------------------------===//
///
/// An interface to the Polyhedral analysis engine(Polly) of LLVM.
///
/// This pass provides an interface to the polyhedral analysis performed by
/// Polly.
///
/// This interface provides basic interface like isParallel, isVectorizable
/// that can be used in LLVM transformation passes.
///
/// Work in progress, this file is subject to change.
//===----------------------------------------------------------------------===//
#include "polly/PolyhedralInfo.h"
#include "polly/DependenceInfo.h"
#include "polly/LinkAllPasses.h"
#include "polly/Options.h"
#include "polly/ScopInfo.h"
#include "polly/Support/GICHelper.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Support/Debug.h"
#include <isl/map.h>
#include <isl/union_map.h>
using namespace llvm;
using namespace polly;
#define DEBUG_TYPE "polyhedral-info"
static cl::opt<bool> CheckParallel("polly-check-parallel",
cl::desc("Check for parallel loops"),
cl::Hidden, cl::init(false), cl::ZeroOrMore,
cl::cat(PollyCategory));
static cl::opt<bool> CheckVectorizable("polly-check-vectorizable",
cl::desc("Check for vectorizable loops"),
cl::Hidden, cl::init(false),
cl::ZeroOrMore, cl::cat(PollyCategory));
void PolyhedralInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredTransitive<DependenceInfoWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequiredTransitive<ScopInfoWrapperPass>();
AU.setPreservesAll();
}
bool PolyhedralInfo::runOnFunction(Function &F) {
DI = &getAnalysis<DependenceInfoWrapperPass>();
SI = &getAnalysis<ScopInfoWrapperPass>();
return false;
}
void PolyhedralInfo::print(raw_ostream &OS, const Module *) const {
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
for (auto *TopLevelLoop : LI) {
for (auto *L : depth_first(TopLevelLoop)) {
OS.indent(2) << L->getHeader()->getName() << ":\t";
if (CheckParallel && isParallel(L))
OS << "Loop is parallel.\n";
else if (CheckParallel)
OS << "Loop is not parallel.\n";
}
}
}
bool PolyhedralInfo::checkParallel(Loop *L, isl_pw_aff **MinDepDistPtr) const {
bool IsParallel;
const Scop *S = getScopContainingLoop(L);
if (!S)
return false;
const Dependences &D =
DI->getDependences(const_cast<Scop *>(S), Dependences::AL_Access);
if (!D.hasValidDependences())
return false;
DEBUG(dbgs() << "Loop :\t" << L->getHeader()->getName() << ":\n");
isl_union_map *Deps =
D.getDependences(Dependences::TYPE_RAW | Dependences::TYPE_WAW |
Dependences::TYPE_WAR | Dependences::TYPE_RED);
DEBUG(dbgs() << "Dependences :\t" << stringFromIslObj(Deps) << "\n");
isl_union_map *Schedule = getScheduleForLoop(S, L);
DEBUG(dbgs() << "Schedule: \t" << stringFromIslObj(Schedule) << "\n");
IsParallel = D.isParallel(Schedule, Deps, MinDepDistPtr);
isl_union_map_free(Schedule);
return IsParallel;
}
bool PolyhedralInfo::isParallel(Loop *L) const { return checkParallel(L); }
const Scop *PolyhedralInfo::getScopContainingLoop(Loop *L) const {
assert((SI) && "ScopInfoWrapperPass is required by PolyhedralInfo pass!\n");
for (auto &It : *SI) {
Region *R = It.first;
if (R->contains(L))
return It.second.get();
}
return nullptr;
}
// Given a Loop and the containing SCoP, we compute the partial schedule
// by taking union of individual schedules of each ScopStmt within the loop
// and projecting out the inner dimensions from the range of the schedule.
// for (i = 0; i < n; i++)
// for (j = 0; j < n; j++)
// A[j] = 1; //Stmt
//
// The original schedule will be
// Stmt[i0, i1] -> [i0, i1]
// The schedule for the outer loop will be
// Stmt[i0, i1] -> [i0]
// The schedule for the inner loop will be
// Stmt[i0, i1] -> [i0, i1]
__isl_give isl_union_map *PolyhedralInfo::getScheduleForLoop(const Scop *S,
Loop *L) const {
isl_union_map *Schedule = isl_union_map_empty(S->getParamSpace());
int CurrDim = S->getRelativeLoopDepth(L);
DEBUG(dbgs() << "Relative loop depth:\t" << CurrDim << "\n");
assert(CurrDim >= 0 && "Loop in region should have at least depth one");
for (auto *BB : L->blocks()) {
auto *SS = S->getStmtFor(BB);
if (!SS)
continue;
unsigned int MaxDim = SS->getNumIterators();
DEBUG(dbgs() << "Maximum depth of Stmt:\t" << MaxDim << "\n");
auto *ScheduleMap = SS->getSchedule();
ScheduleMap = isl_map_project_out(ScheduleMap, isl_dim_out, CurrDim + 1,
MaxDim - CurrDim - 1);
ScheduleMap =
isl_map_set_tuple_id(ScheduleMap, isl_dim_in, SS->getDomainId());
Schedule =
isl_union_map_union(Schedule, isl_union_map_from_map(ScheduleMap));
}
Schedule = isl_union_map_coalesce(Schedule);
return Schedule;
}
char PolyhedralInfo::ID = 0;
Pass *polly::createPolyhedralInfoPass() { return new PolyhedralInfo(); }
INITIALIZE_PASS_BEGIN(PolyhedralInfo, "polyhedral-info",
"Polly - Interface to polyhedral analysis engine", false,
false);
INITIALIZE_PASS_DEPENDENCY(DependenceInfoWrapperPass);
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
INITIALIZE_PASS_DEPENDENCY(ScopInfoWrapperPass);
INITIALIZE_PASS_END(PolyhedralInfo, "polyhedral-info",
"Polly - Interface to polyhedral analysis engine", false,
false)

View File

@ -28,6 +28,7 @@ endif ()
add_polly_library(Polly
Analysis/DependenceInfo.cpp
Analysis/PolyhedralInfo.cpp
Analysis/ScopDetection.cpp
Analysis/ScopDetectionDiagnostic.cpp
Analysis/ScopInfo.cpp

View File

@ -26,6 +26,7 @@
#include "polly/DependenceInfo.h"
#include "polly/LinkAllPasses.h"
#include "polly/Options.h"
#include "polly/PolyhedralInfo.h"
#include "polly/ScopDetection.h"
#include "polly/ScopInfo.h"
#include "llvm/Analysis/CFGPrinter.h"
@ -152,6 +153,11 @@ static cl::opt<bool>
cl::desc("Show the Polly CFG right after code generation"),
cl::Hidden, cl::init(false), cl::cat(PollyCategory));
static cl::opt<bool>
EnablePolyhedralInfo("polly-enable-polyhedralinfo",
cl::desc("Enable polyhedral interface of Polly"),
cl::Hidden, cl::init(false), cl::cat(PollyCategory));
namespace polly {
void initializePollyPasses(PassRegistry &Registry) {
initializeCodeGenerationPass(Registry);
@ -168,6 +174,7 @@ void initializePollyPasses(PassRegistry &Registry) {
initializeIslAstInfoPass(Registry);
initializeIslScheduleOptimizerPass(Registry);
initializePollyCanonicalizePass(Registry);
initializePolyhedralInfoPass(Registry);
initializeScopDetectionPass(Registry);
initializeScopInfoRegionPassPass(Registry);
initializeScopInfoWrapperPassPass(Registry);
@ -216,6 +223,8 @@ void registerPollyPasses(llvm::legacy::PassManagerBase &PM) {
PM.add(polly::createDOTOnlyPrinterPass());
PM.add(polly::createScopInfoRegionPassPass());
if (EnablePolyhedralInfo)
PM.add(polly::createPolyhedralInfoPass());
if (ImportJScop)
PM.add(polly::createJSONImporterPass());

View File

@ -1,10 +1,13 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void jd(int *A) {
; CHECK: #pragma omp parallel for
; PINFO: for.cond2: Loop is parallel.
; for (int i = 0; i < 1024; i++)
; A[i] = 1;
; CHECK: #pragma omp parallel for
; PINFO: for.cond: Loop is parallel.
; for (int i = 0; i < 1024; i++)
; A[i] = A[i] * 2;
; }

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for (i = 0; i < 1024; i++)
@ -49,3 +50,6 @@ ret:
; CHECK-NOT: #pragma omp parallel for
; CHECK: for (int c1 = 0; c1 <= 1023; c1 += 1)
; CHECK: Stmt_loop_body(c0, c1);
;
; PINFO: loop.i: Loop is parallel.
; PINFO-NEXT: loop.j: Loop is parallel.

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; int A[1024][1024];
; void bar(int n) {
@ -46,3 +47,6 @@ ret:
; CHECK: #pragma simd
; CHECK: for (int c1 = 0; c1 < n; c1 += 1)
; CHECK: Stmt_loop_body(c0, c1);
; PINFO: loop.i: Loop is parallel.
; PINFO-NEXT: loop.j: Loop is parallel.

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for (i = 0; i < n; i++)
@ -44,3 +45,6 @@ ret:
; CHECK: #pragma omp parallel for
; CHECK: for (int c1 = 0; c1 < n; c1 += 1)
; CHECK: Stmt_loop_body(c0, c1);
; PINFO: loop.i: Loop is not parallel.
; PINFO-NEXT: loop.j: Loop is parallel.

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for (i = 0; i < n; i++)
@ -43,3 +44,6 @@ ret:
; CHECK: for (int c0 = 0; c0 < n; c0 += 1)
; CHECK: for (int c1 = 0; c1 < n; c1 += 1)
; CHECK: Stmt_loop_body(c0, c1);
; PINFO: loop.i: Loop is parallel.
; PINFO-NEXT: loop.j: Loop is not parallel.

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for (i = 0; i < n; i++)
@ -31,3 +32,4 @@ ret:
; CHECK: for (int c0 = 0; c0 < n; c0 += 1)
; CHECK: Stmt_loop_body(c0)
; PINFO: loop.header: Loop is not parallel.

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-parallel-force -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for (i = 0; i < n; i++)
@ -33,3 +34,4 @@ ret:
; CHECK: #pragma omp parallel for
; CHECK: for (int c0 = 0; c0 < n; c0 += 1)
; CHECK: Stmt_loop_body(c0)
; PINFO: loop.header: Loop is parallel.

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-ast -polly-parallel -polly-dependences-computeout=1 -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; for (i = 0; i < n; i++)
@ -33,3 +34,4 @@ ret:
; CHECK-NOT: #pragma omp parallel for
; CHECK: for (int c0 = 0; c0 < n; c0 += 1)
; CHECK: Stmt_loop_body(c0)
; PINFO: loop.header: Loop is parallel.

View File

@ -1,11 +1,14 @@
; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void f(int *A, int N) {
; CHECK: #pragma minimal dependence distance: 1
; PINFO: for.cond: Loop is not parallel.
; for (int j = 0; j < N; j++)
; CHECK: #pragma minimal dependence distance: 8
; for (int i = 0; i < N; i++)
; A[i + 8] = A[i] + 1;
; CHECK: #pragma minimal dependence distance: 8
; PINFO-NEXT: for.cond1: Loop is not parallel.
; for (int i = 0; i < N; i++)
; A[i + 8] = A[i] + 1;
; }
;
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"

View File

@ -1,7 +1,9 @@
; RUN: opt %loadPolly -basicaa -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void f(int *restrict A, int *restrict B, int N) {
; CHECK: #pragma minimal dependence distance: 5
; PINFO: for.cond: Loop is not parallel.
; for (int i = 0; i < N; i++) {
; A[i + 7] = A[i] + 1;
; B[i + 5] = B[i] + 1;

View File

@ -1,11 +1,14 @@
; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void f(int *A, int N, int c) {
; CHECK: #pragma minimal dependence distance: 1
; PINFO: for.cond: Loop is not parallel.
; for (int j = 0; j < N; j++)
; CHECK: #pragma minimal dependence distance: max(-c, c)
; for (int i = 0; i < N; i++)
; A[i + c] = A[i] + 1;
; CHECK: #pragma minimal dependence distance: max(-c, c)
; PINFO-NEXT: for.cond1: Loop is not parallel.
; for (int i = 0; i < N; i++)
; A[i + c] = A[i] + 1;
; }
;
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"

View File

@ -1,11 +1,14 @@
; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void f(int *A, int N, int c, int v) {
; CHECK: #pragma minimal dependence distance: 1
; PINFO: for.cond: Loop is not parallel.
; for (int j = 0; j < N; j++)
; CHECK: #pragma minimal dependence distance: max(-c - v, c + v)
; for (int i = 0; i < N; i++)
; A[i + c + v] = A[i] + 1;
; CHECK: #pragma minimal dependence distance: max(-c - v, c + v)
; PINFO-NEXT: for.cond1: Loop is not parallel.
; for (int i = 0; i < N; i++)
; A[i + c + v] = A[i] + 1;
; }
;
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"

View File

@ -1,7 +1,9 @@
; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void f(int *A, int N) {
; CHECK: #pragma minimal dependence distance: ((N - 1) % 2) + 1
; PINFO: for.cond: Loop is not parallel.
; for (int i = 0; i < N; i++)
; A[i] = A[N - i] + 1;
; }

View File

@ -1,11 +1,14 @@
; RUN: opt %loadPolly -polly-canonicalize -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void f(int *restrict A, int *restrict sum) {
; CHECK: #pragma minimal dependence distance: 1
; PINFO: for.cond: Loop is not parallel.
; for (int j = 0; j < 1024; j++)
; CHECK: #pragma minimal dependence distance: 1
; for (int i = j; i < 1024; i++)
; A[i - 3] = A[j] * 2 + A[j] + 2;
; CHECK: #pragma minimal dependence distance: 1
; PINFO-NEXT: for.cond1: Loop is not parallel.
; for (int i = j; i < 1024; i++)
; A[i - 3] = A[j] * 2 + A[j] + 2;
; }
;
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"

View File

@ -1,8 +1,10 @@
; RUN: opt %loadPolly -basicaa -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; void f(int *restrict A, int *restrict B, int *restrict C, int *restrict D,
; int *restrict E, int N) {
; CHECK: #pragma minimal dependence distance: N >= 35 ? 1 : N >= 17 && N <= 34 ? 2 : 5
; PINFO: for.cond: Loop is not parallel.
; for (int i = 0; i < N; i++) {
; A[i] = A[100 - 2 * i] + 1;
; B[i] = B[100 - 3 * i] + 1;

View File

@ -1,12 +1,16 @@
; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; CHECK: #pragma known-parallel reduction (^ : sum)
; void f(int N, int M, int P, int sum[P][M]) {
; PINFO: for.cond: Loop is not parallel.
; for (int i = 0; i < N; i++)
; for (int j = 0; j < P; j++)
; CHECK: #pragma simd
; for (int k = 0; k < M; k++)
; sum[j][k] ^= j;
; PINFO-NEXT: for.cond1: Loop is parallel.
; for (int j = 0; j < P; j++)
; CHECK: #pragma simd
; PINFO-NEXT: for.cond4: Loop is parallel.
; for (int k = 0; k < M; k++)
; sum[j][k] ^= j;
; }
;
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"

View File

@ -1,10 +1,13 @@
; RUN: opt %loadPolly -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; Verify that we won't privatize anything in the outer dimension
;
; CHECK: #pragma known-parallel
; PINFO: for.cond: Loop is parallel.
; CHECK: for (int c0 = 0; c0 < 2 * n; c0 += 1)
; CHECK: #pragma simd reduction
; PINFO-NEXT: for.cond1: Loop is not parallel.
; CHECK: for (int c1 = 0; c1 <= 1023; c1 += 1)
; CHECK: Stmt_for_body3(c0, c1);
;

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-import-jscop-dir=%S -polly-import-jscop -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; CHECK-NOT: #pragma simd{{\s*$}}
; CHECK: #pragma simd reduction
@ -6,6 +7,9 @@
; CHECK: #pragma simd{{\s*$}}
; CHECK: Stmt_S1(n - c1)
;
; PINFO: for.cond2: Loop is parallel.
; PINFO-NEXT: for.cond: Loop is not parallel.
;
; void rlr(int *A, long n) {
; for (long i = 0; i < 2 * n; i++)
; S0: A[0] += i;

View File

@ -1,4 +1,5 @@
; RUN: opt %loadPolly -polly-import-jscop-dir=%S -polly-import-jscop -polly-ast -polly-ast-detect-parallel -analyze < %s | FileCheck %s
; RUN: opt %loadPolly -polyhedral-info -polly-check-parallel -analyze < %s | FileCheck %s -check-prefix=PINFO
;
; CHECK: #pragma known-parallel reduction (+ : A)
; CHECK-NEXT: for (int c0 = 0; c0 <= 2; c0 += 1) {
@ -12,6 +13,9 @@
; CHECK-NEXT: Stmt_S0(c1);
; CHECK-NEXT: }
;
; PINFO: for.cond2: Loop is parallel.
; PINFO-NEXT: for.cond: Loop is not parallel.
;
; void rms(int *A, long n) {
; for (long i = 0; i < 2 * n; i++)
; S0: A[0] += i;