@@ -654,16 +654,15 @@ void promoteToRegistersBelow(MappedScop& mscop, detail::ScheduleTree* scope) {
654654 auto blockSchedule = mscop.blockMappingSchedule (mscop.schedule ());
655655
656656 // Pure affine schedule without (mapping) filters.
657- isl::multi_union_pw_aff partialSchedMupa =
658- partialScheduleMupa<Prefix>(root, scope);
657+ auto partialSchedMupa = partialScheduleMupa<Prefix>(root, scope);
659658 // Schedule with block mapping filter.
660659 auto partialSched =
661660 isl::union_map::from (partialSchedMupa).intersect_domain (blockMapping);
662661 // The following promotion validity and profitability checks need to be
663662 // performed with respect to the block mapping, so append the block schedule.
664663 // If the partial schedule contains it already, it will just end up with
665664 // identical dimensions without affecting the result of the checks.
666- partialSchedMupa = partialSchedMupa.flat_range_product (blockSchedule);
665+ auto partialSchedBlockMupa = partialSchedMupa.range_product (blockSchedule);
667666
668667 for (auto & tensorGroups : groupMap) {
669668 auto tensorId = tensorGroups.first ;
@@ -677,11 +676,11 @@ void promoteToRegistersBelow(MappedScop& mscop, detail::ScheduleTree* scope) {
677676 continue ;
678677 }
679678 if (!isPromotableToRegistersBelow (
680- *group, root, scope, partialSchedMupa , threadSchedule)) {
679+ *group, root, scope, partialSchedBlockMupa , threadSchedule)) {
681680 continue ;
682681 }
683682 // Check reuse within threads.
684- auto schedule = partialSchedMupa. flat_range_product (threadSchedule);
683+ auto schedule = partialSchedBlockMupa. range_product (threadSchedule);
685684 if (!hasReuseWithin (*group, schedule)) {
686685 continue ;
687686 }
0 commit comments