@@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
396396#define __flush_tlb_range_op (op , start , pages , stride , \
397397 asid , tlb_level , tlbi_user , lpa2 ) \
398398do { \
399+ typeof(start) __flush_start = start; \
400+ typeof(pages) __flush_pages = pages; \
399401 int num = 0; \
400402 int scale = 3; \
401403 int shift = lpa2 ? 16 : PAGE_SHIFT; \
402404 unsigned long addr; \
403405 \
404- while (pages > 0) { \
406+ while (__flush_pages > 0) { \
405407 if (!system_supports_tlb_range() || \
406- pages == 1 || \
407- (lpa2 && start != ALIGN(start , SZ_64K))) { \
408- addr = __TLBI_VADDR(start , asid); \
408+ __flush_pages == 1 || \
409+ (lpa2 && __flush_start != ALIGN(__flush_start , SZ_64K))) { \
410+ addr = __TLBI_VADDR(__flush_start , asid); \
409411 __tlbi_level(op, addr, tlb_level); \
410412 if (tlbi_user) \
411413 __tlbi_user_level(op, addr, tlb_level); \
412- start += stride; \
413- pages -= stride >> PAGE_SHIFT; \
414+ __flush_start += stride; \
415+ __flush_pages -= stride >> PAGE_SHIFT; \
414416 continue; \
415417 } \
416418 \
417- num = __TLBI_RANGE_NUM(pages , scale); \
419+ num = __TLBI_RANGE_NUM(__flush_pages , scale); \
418420 if (num >= 0) { \
419- addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
421+ addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \
420422 scale, num, tlb_level); \
421423 __tlbi(r##op, addr); \
422424 if (tlbi_user) \
423425 __tlbi_user(r##op, addr); \
424- start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
425- pages -= __TLBI_RANGE_PAGES(num, scale); \
426+ __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
427+ __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
426428 } \
427429 scale--; \
428430 } \
0 commit comments