@@ -415,13 +415,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
415415 else
416416 bg_thresh = (bg_ratio * available_memory ) / PAGE_SIZE ;
417417
418- if (bg_thresh >= thresh )
419- bg_thresh = thresh / 2 ;
420418 tsk = current ;
421419 if (rt_task (tsk )) {
422420 bg_thresh += bg_thresh / 4 + global_wb_domain .dirty_limit / 32 ;
423421 thresh += thresh / 4 + global_wb_domain .dirty_limit / 32 ;
424422 }
423+ /*
424+ * Dirty throttling logic assumes the limits in page units fit into
425+ * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
426+ */
427+ if (thresh > UINT_MAX )
428+ thresh = UINT_MAX ;
429+ /* This makes sure bg_thresh is within 32-bits as well */
430+ if (bg_thresh >= thresh )
431+ bg_thresh = thresh / 2 ;
425432 dtc -> thresh = thresh ;
426433 dtc -> bg_thresh = bg_thresh ;
427434
@@ -471,7 +478,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
471478 if (rt_task (tsk ))
472479 dirty += dirty / 4 ;
473480
474- return dirty ;
481+ /*
482+ * Dirty throttling logic assumes the limits in page units fit into
483+ * 32-bits. This gives 16TB dirty limits max which is hopefully enough.
484+ */
485+ return min_t (unsigned long , dirty , UINT_MAX );
475486}
476487
477488/**
@@ -508,10 +519,17 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write,
508519 void * buffer , size_t * lenp , loff_t * ppos )
509520{
510521 int ret ;
522+ unsigned long old_bytes = dirty_background_bytes ;
511523
512524 ret = proc_doulongvec_minmax (table , write , buffer , lenp , ppos );
513- if (ret == 0 && write )
525+ if (ret == 0 && write ) {
526+ if (DIV_ROUND_UP (dirty_background_bytes , PAGE_SIZE ) >
527+ UINT_MAX ) {
528+ dirty_background_bytes = old_bytes ;
529+ return - ERANGE ;
530+ }
514531 dirty_background_ratio = 0 ;
532+ }
515533 return ret ;
516534}
517535
@@ -537,6 +555,10 @@ static int dirty_bytes_handler(struct ctl_table *table, int write,
537555
538556 ret = proc_doulongvec_minmax (table , write , buffer , lenp , ppos );
539557 if (ret == 0 && write && vm_dirty_bytes != old_bytes ) {
558+ if (DIV_ROUND_UP (vm_dirty_bytes , PAGE_SIZE ) > UINT_MAX ) {
559+ vm_dirty_bytes = old_bytes ;
560+ return - ERANGE ;
561+ }
540562 writeback_set_ratelimit ();
541563 vm_dirty_ratio = 0 ;
542564 }
@@ -1638,7 +1660,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
16381660 */
16391661 dtc -> wb_thresh = __wb_calc_thresh (dtc );
16401662 dtc -> wb_bg_thresh = dtc -> thresh ?
1641- div64_u64 ( dtc -> wb_thresh * dtc -> bg_thresh , dtc -> thresh ) : 0 ;
1663+ div_u64 (( u64 ) dtc -> wb_thresh * dtc -> bg_thresh , dtc -> thresh ) : 0 ;
16421664
16431665 /*
16441666 * In order to avoid the stacked BDI deadlock we need
0 commit comments