@@ -314,9 +314,12 @@ const detail::ScheduleTree* findThreadMappingAncestor(
314314}
315315
316316/*
317- * Check if a reference group is accessed in a coalesced way.
317+ * Should this reference group be promoted for the purpose of coalescing?
318318 *
319- * In particular, check if incrementing the schedule dimension mapped to
319+ * If the reference group is not already accessed in a coalesced way,
320+ * then the group should be promoted.
321+ * The check for coalesced accesses is performed as follows.
322+ * Check if incrementing the schedule dimension mapped to
320323 * Thread::x results in the last tensor index being incremented as well.
321324 * Since accesses in the group may belong to different statements, which may
322325 * have different loops mapped to Thread::x, perform the check for each basic
@@ -325,7 +328,7 @@ const detail::ScheduleTree* findThreadMappingAncestor(
325328 * accessed in a coalesced way if all references in this group are accessed in
326329 * a coalesced way.
327330 */
328- bool isCoalesced (
331+ bool promotionImprovesCoalescing (
329332 const ThreadIdxXScheduleDepthState& threadIdxXScheduleDepthState,
330333 const TensorReferenceGroup& group,
331334 isl::union_map schedule,
@@ -353,11 +356,11 @@ bool isCoalesced(
353356 .apply_range (scheduledAccess);
354357
355358 if (not accessedByAdjacentX.is_subset (elementToNext)) {
356- return false ;
359+ return true ;
357360 }
358361 }
359362 }
360- return true ;
363+ return false ;
361364}
362365
363366/*
@@ -558,7 +561,7 @@ void promoteToSharedGreedy(
558561 // Do not promote if the group features no reuse and is accessed in a
559562 // coalesced way.
560563 if (!hasReuseWithin (*group, partialSchedMupa) &&
561- isCoalesced (
564+ ! promotionImprovesCoalescing (
562565 threadIdxXScheduleDepthState,
563566 *group,
564567 fullSched,
0 commit comments