Skip to content

Commit ca7f5bb

Browse files
committedOct 20, 2015
Full/partial tile separation for vectorization
We isolate full tiles from partial tiles to be able to, for example, vectorize loops with parametric lower and/or upper bounds. If we use -polly-vectorizer=stripmine, we can see execution-time improvements: correlation from 1m7361s to 0m5720s (-67.05 %), covariance from 1m5561s to 0m5680s (-63.50 %), ary3 from 2m3201s to 1m2361s (-46.72 %), CrystalMk from 8m5565s to 7m4285s (-13.18 %). The current full/partial tile separation increases compile-time more than necessary. As a result, we see in compile time regressions, for example, for 3mm from 0m6320s to 0m9881s (56.34%). Some of this compile time increase is expected as we generate more IR and consequently more time is spent in the LLVM backends. However, a first investiagation has shown that a larger portion of compile time is unnecessarily spent inside Polly's parallelism detection and could be eliminated by propagating existing knowledge about vector loop parallelism. Before enabling -polly-vectorizer=stripmine by default, it is necessary to address this compile-time issue. Contributed-by: Roman Gareev <gareevroman@gmail.com> Reviewers: jdoerfert, grosser Subscribers: grosser, #polly Differential Revision: http://reviews.llvm.org/D13779 llvm-svn: 250809
1 parent 648a2c3 commit ca7f5bb

File tree

3 files changed

+202
-0
lines changed

3 files changed

+202
-0
lines changed
 

‎polly/include/polly/ScheduleOptimizer.h

+11
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,17 @@ class ScheduleTreeOptimizer {
6464
static bool isProfitableSchedule(polly::Scop &S,
6565
__isl_keep isl_union_map *NewSchedule);
6666

67+
/// @brief Isolate a set of partial tile prefixes.
68+
///
69+
/// This set should ensure that it contains only partial tile prefixes that
70+
/// have exactly VectorWidth iterations.
71+
///
72+
/// @param Node A schedule node band, which is a parent of a band node,
73+
/// that contains a vector loop.
74+
/// @return Modified isl_schedule_node.
75+
static __isl_give isl_schedule_node *
76+
isolateFullPartialTiles(__isl_take isl_schedule_node *Node, int VectorWidth);
77+
6778
private:
6879
/// @brief Tile a schedule node.
6980
///

‎polly/lib/Transform/ScheduleOptimizer.cpp

+99
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,104 @@ static cl::list<int>
160160
cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
161161
cl::cat(PollyCategory));
162162

163+
/// @brief Create an isl_union_set, which describes the isolate option based
164+
/// on IsoalteDomain.
165+
///
166+
/// @param IsolateDomain An isl_set whose last dimension is the only one that
167+
/// should belong to the current band node.
168+
static __isl_give isl_union_set *
169+
getIsolateOptions(__isl_take isl_set *IsolateDomain) {
170+
auto Dims = isl_set_dim(IsolateDomain, isl_dim_set);
171+
auto *IsolateRelation = isl_map_from_domain(IsolateDomain);
172+
IsolateRelation = isl_map_move_dims(IsolateRelation, isl_dim_out, 0,
173+
isl_dim_in, Dims - 1, 1);
174+
auto *IsolateOption = isl_map_wrap(IsolateRelation);
175+
auto *Id = isl_id_alloc(isl_set_get_ctx(IsolateOption), "isolate", NULL);
176+
return isl_union_set_from_set(isl_set_set_tuple_id(IsolateOption, Id));
177+
}
178+
179+
/// @brief Create an isl_union_set, which describes the atomic option for the
180+
/// dimension of the current node.
181+
///
182+
/// It may help to reduce the size of generated code.
183+
///
184+
/// @param Ctx An isl_ctx, which is used to create the isl_union_set.
185+
static __isl_give isl_union_set *getAtomicOptions(__isl_take isl_ctx *Ctx) {
186+
auto *Space = isl_space_set_alloc(Ctx, 0, 1);
187+
auto *AtomicOption = isl_set_universe(Space);
188+
auto *Id = isl_id_alloc(Ctx, "atomic", NULL);
189+
return isl_union_set_from_set(isl_set_set_tuple_id(AtomicOption, Id));
190+
}
191+
192+
/// @brief Make the last dimension of Set to take values
193+
/// from 0 to VectorWidth - 1.
194+
///
195+
/// @param Set A set, which should be modified.
196+
/// @param VectorWidth A parameter, which determines the constraint.
197+
static __isl_give isl_set *addExtentConstraints(__isl_take isl_set *Set,
198+
int VectorWidth) {
199+
auto Dims = isl_set_dim(Set, isl_dim_set);
200+
auto Space = isl_set_get_space(Set);
201+
auto *LocalSpace = isl_local_space_from_space(Space);
202+
auto *ExtConstr =
203+
isl_constraint_alloc_inequality(isl_local_space_copy(LocalSpace));
204+
ExtConstr = isl_constraint_set_constant_si(ExtConstr, 0);
205+
ExtConstr =
206+
isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, 1);
207+
Set = isl_set_add_constraint(Set, ExtConstr);
208+
ExtConstr = isl_constraint_alloc_inequality(LocalSpace);
209+
ExtConstr = isl_constraint_set_constant_si(ExtConstr, VectorWidth - 1);
210+
ExtConstr =
211+
isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, -1);
212+
return isl_set_add_constraint(Set, ExtConstr);
213+
}
214+
215+
/// @brief Build the desired set of partial tile prefixes.
216+
///
217+
/// We build a set of partial tile prefixes, which are prefixes of the vector
218+
/// loop that have exactly VectorWidth iterations.
219+
///
220+
/// 1. Get all prefixes of the vector loop.
221+
/// 2. Extend it to a set, which has exactly VectorWidth iterations for
222+
/// any prefix from the set that was built on the previous step.
223+
/// 3. Subtract loop domain from it, project out the vector loop dimension and
224+
/// get a set of prefixes, which don’t have exactly VectorWidth iterations.
225+
/// 4. Subtract it from all prefixes of the vector loop and get the desired
226+
/// set.
227+
///
228+
/// @param ScheduleRange A range of a map, which describes a prefix schedule
229+
/// relation.
230+
static __isl_give isl_set *
231+
getPartialTilePrefixes(__isl_take isl_set *ScheduleRange, int VectorWidth) {
232+
auto Dims = isl_set_dim(ScheduleRange, isl_dim_set);
233+
auto *LoopPrefixes = isl_set_project_out(isl_set_copy(ScheduleRange),
234+
isl_dim_set, Dims - 1, 1);
235+
auto *ExtentPrefixes =
236+
isl_set_add_dims(isl_set_copy(LoopPrefixes), isl_dim_set, 1);
237+
ExtentPrefixes = addExtentConstraints(ExtentPrefixes, VectorWidth);
238+
auto *BadPrefixes = isl_set_subtract(ExtentPrefixes, ScheduleRange);
239+
BadPrefixes = isl_set_project_out(BadPrefixes, isl_dim_set, Dims - 1, 1);
240+
return isl_set_subtract(LoopPrefixes, BadPrefixes);
241+
}
242+
243+
__isl_give isl_schedule_node *ScheduleTreeOptimizer::isolateFullPartialTiles(
244+
__isl_take isl_schedule_node *Node, int VectorWidth) {
245+
assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
246+
Node = isl_schedule_node_child(Node, 0);
247+
Node = isl_schedule_node_child(Node, 0);
248+
auto *SchedRelUMap = isl_schedule_node_get_prefix_schedule_relation(Node);
249+
auto *ScheduleRelation = isl_map_from_union_map(SchedRelUMap);
250+
auto *ScheduleRange = isl_map_range(ScheduleRelation);
251+
auto *IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
252+
auto *AtomicOption = getAtomicOptions(isl_set_get_ctx(IsolateDomain));
253+
auto *IsolateOption = getIsolateOptions(IsolateDomain);
254+
Node = isl_schedule_node_parent(Node);
255+
Node = isl_schedule_node_parent(Node);
256+
auto *Options = isl_union_set_union(IsolateOption, AtomicOption);
257+
Node = isl_schedule_node_band_set_ast_build_options(Node, Options);
258+
return Node;
259+
}
260+
163261
__isl_give isl_schedule_node *
164262
ScheduleTreeOptimizer::prevectSchedBand(__isl_take isl_schedule_node *Node,
165263
unsigned DimToVectorize,
@@ -183,6 +281,7 @@ ScheduleTreeOptimizer::prevectSchedBand(__isl_take isl_schedule_node *Node,
183281
Sizes =
184282
isl_multi_val_set_val(Sizes, 0, isl_val_int_from_si(Ctx, VectorWidth));
185283
Node = isl_schedule_node_band_tile(Node, Sizes);
284+
Node = isolateFullPartialTiles(Node, VectorWidth);
186285
Node = isl_schedule_node_child(Node, 0);
187286
// Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
188287
// we will have troubles to match it in the backend.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
; RUN: opt -S %loadPolly -polly-vectorizer=stripmine -polly-opt-isl -polly-ast -analyze < %s | FileCheck %s
2+
3+
; CHECK: // 1st level tiling - Tiles
4+
; CHECK: #pragma known-parallel
5+
; CHECK: for (int c0 = 0; c0 <= floord(ni - 1, 32); c0 += 1)
6+
; CHECK: for (int c1 = 0; c1 <= floord(nj - 1, 32); c1 += 1)
7+
; CHECK: for (int c2 = 0; c2 <= floord(nk - 1, 32); c2 += 1) {
8+
; CHECK: // 1st level tiling - Points
9+
; CHECK: for (int c3 = 0; c3 <= min(31, ni - 32 * c0 - 1); c3 += 1) {
10+
; CHECK: for (int c4 = 0; c4 <= min(7, -8 * c1 + nj / 4 - 1); c4 += 1)
11+
; CHECK: for (int c5 = 0; c5 <= min(31, nk - 32 * c2 - 1); c5 += 1)
12+
; CHECK: #pragma simd
13+
; CHECK: for (int c6 = 0; c6 <= 3; c6 += 1)
14+
; CHECK: Stmt_for_body_6(32 * c0 + c3, 32 * c1 + 4 * c4 + c6, 32 * c2 + c5);
15+
; CHECK: if (nj >= 32 * c1 + 4 && 32 * c1 + 31 >= nj) {
16+
; CHECK: for (int c5 = 0; c5 <= min(31, nk - 32 * c2 - 1); c5 += 1)
17+
; CHECK: #pragma simd
18+
; CHECK: for (int c6 = 0; c6 < nj % 4; c6 += 1)
19+
; CHECK: Stmt_for_body_6(32 * c0 + c3, -((nj - 1) % 4) + nj + c6 - 1, 32 * c2 + c5);
20+
; CHECK: } else if (32 * c1 + 3 >= nj)
21+
; CHECK: for (int c5 = 0; c5 <= min(31, nk - 32 * c2 - 1); c5 += 1)
22+
; CHECK: #pragma simd
23+
; CHECK: for (int c6 = 0; c6 < nj - 32 * c1; c6 += 1)
24+
; CHECK: Stmt_for_body_6(32 * c0 + c3, 32 * c1 + c6, 32 * c2 + c5);
25+
; CHECK: }
26+
; CHECK: }
27+
28+
; Function Attrs: nounwind uwtable
29+
define void @kernel_gemm(i32 %ni, i32 %nj, i32 %nk, double %alpha, double %beta, [1024 x double]* %C, [1024 x double]* %A, [1024 x double]* %B) #0 {
30+
entry:
31+
%cmp.27 = icmp sgt i32 %ni, 0
32+
br i1 %cmp.27, label %for.cond.1.preheader.lr.ph, label %for.end.22
33+
34+
for.cond.1.preheader.lr.ph: ; preds = %entry
35+
br label %for.cond.1.preheader
36+
37+
for.cond.1.preheader: ; preds = %for.cond.1.preheader.lr.ph, %for.inc.20
38+
%indvars.iv33 = phi i64 [ 0, %for.cond.1.preheader.lr.ph ], [ %indvars.iv.next34, %for.inc.20 ]
39+
%cmp2.25 = icmp sgt i32 %nj, 0
40+
br i1 %cmp2.25, label %for.cond.4.preheader.lr.ph, label %for.inc.20
41+
42+
for.cond.4.preheader.lr.ph: ; preds = %for.cond.1.preheader
43+
br label %for.cond.4.preheader
44+
45+
for.cond.4.preheader: ; preds = %for.cond.4.preheader.lr.ph, %for.inc.17
46+
%indvars.iv29 = phi i64 [ 0, %for.cond.4.preheader.lr.ph ], [ %indvars.iv.next30, %for.inc.17 ]
47+
%cmp5.23 = icmp sgt i32 %nk, 0
48+
br i1 %cmp5.23, label %for.body.6.lr.ph, label %for.inc.17
49+
50+
for.body.6.lr.ph: ; preds = %for.cond.4.preheader
51+
br label %for.body.6
52+
53+
for.body.6: ; preds = %for.body.6.lr.ph, %for.body.6
54+
%indvars.iv = phi i64 [ 0, %for.body.6.lr.ph ], [ %indvars.iv.next, %for.body.6 ]
55+
%arrayidx8 = getelementptr inbounds [1024 x double], [1024 x double]* %A, i64 %indvars.iv33, i64 %indvars.iv
56+
%0 = load double, double* %arrayidx8, align 8
57+
%arrayidx12 = getelementptr inbounds [1024 x double], [1024 x double]* %B, i64 %indvars.iv, i64 %indvars.iv29
58+
%1 = load double, double* %arrayidx12, align 8
59+
%mul = fmul double %0, %1
60+
%arrayidx16 = getelementptr inbounds [1024 x double], [1024 x double]* %C, i64 %indvars.iv33, i64 %indvars.iv29
61+
%2 = load double, double* %arrayidx16, align 8
62+
%add = fadd double %2, %mul
63+
store double %add, double* %arrayidx16, align 8
64+
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
65+
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
66+
%exitcond = icmp ne i32 %lftr.wideiv, %nk
67+
br i1 %exitcond, label %for.body.6, label %for.cond.4.for.inc.17_crit_edge
68+
69+
for.cond.4.for.inc.17_crit_edge: ; preds = %for.body.6
70+
br label %for.inc.17
71+
72+
for.inc.17: ; preds = %for.cond.4.for.inc.17_crit_edge, %for.cond.4.preheader
73+
%indvars.iv.next30 = add nuw nsw i64 %indvars.iv29, 1
74+
%lftr.wideiv31 = trunc i64 %indvars.iv.next30 to i32
75+
%exitcond32 = icmp ne i32 %lftr.wideiv31, %nj
76+
br i1 %exitcond32, label %for.cond.4.preheader, label %for.cond.1.for.inc.20_crit_edge
77+
78+
for.cond.1.for.inc.20_crit_edge: ; preds = %for.inc.17
79+
br label %for.inc.20
80+
81+
for.inc.20: ; preds = %for.cond.1.for.inc.20_crit_edge, %for.cond.1.preheader
82+
%indvars.iv.next34 = add nuw nsw i64 %indvars.iv33, 1
83+
%lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32
84+
%exitcond36 = icmp ne i32 %lftr.wideiv35, %ni
85+
br i1 %exitcond36, label %for.cond.1.preheader, label %for.cond.for.end.22_crit_edge
86+
87+
for.cond.for.end.22_crit_edge: ; preds = %for.inc.20
88+
br label %for.end.22
89+
90+
for.end.22: ; preds = %for.cond.for.end.22_crit_edge, %entry
91+
ret void
92+
}

0 commit comments

Comments
 (0)
Please sign in to comment.