@@ -479,6 +479,119 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb
479479 return sectors ;
480480}
481481
482+ /* Check if second and later bottom devices are compliant */
483+ static bool blk_stack_atomic_writes_tail (struct queue_limits * t ,
484+ struct queue_limits * b )
485+ {
486+ /* We're not going to support different boundary sizes.. yet */
487+ if (t -> atomic_write_hw_boundary != b -> atomic_write_hw_boundary )
488+ return false;
489+
490+ /* Can't support this */
491+ if (t -> atomic_write_hw_unit_min > b -> atomic_write_hw_unit_max )
492+ return false;
493+
494+ /* Or this */
495+ if (t -> atomic_write_hw_unit_max < b -> atomic_write_hw_unit_min )
496+ return false;
497+
498+ t -> atomic_write_hw_max = min (t -> atomic_write_hw_max ,
499+ b -> atomic_write_hw_max );
500+ t -> atomic_write_hw_unit_min = max (t -> atomic_write_hw_unit_min ,
501+ b -> atomic_write_hw_unit_min );
502+ t -> atomic_write_hw_unit_max = min (t -> atomic_write_hw_unit_max ,
503+ b -> atomic_write_hw_unit_max );
504+ return true;
505+ }
506+
507+ /* Check for valid boundary of first bottom device */
508+ static bool blk_stack_atomic_writes_boundary_head (struct queue_limits * t ,
509+ struct queue_limits * b )
510+ {
511+ /*
512+ * Ensure atomic write boundary is aligned with chunk sectors. Stacked
513+ * devices store chunk sectors in t->io_min.
514+ */
515+ if (b -> atomic_write_hw_boundary > t -> io_min &&
516+ b -> atomic_write_hw_boundary % t -> io_min )
517+ return false;
518+ if (t -> io_min > b -> atomic_write_hw_boundary &&
519+ t -> io_min % b -> atomic_write_hw_boundary )
520+ return false;
521+
522+ t -> atomic_write_hw_boundary = b -> atomic_write_hw_boundary ;
523+ return true;
524+ }
525+
526+
527+ /* Check stacking of first bottom device */
528+ static bool blk_stack_atomic_writes_head (struct queue_limits * t ,
529+ struct queue_limits * b )
530+ {
531+ if (b -> atomic_write_hw_boundary &&
532+ !blk_stack_atomic_writes_boundary_head (t , b ))
533+ return false;
534+
535+ if (t -> io_min <= SECTOR_SIZE ) {
536+ /* No chunk sectors, so use bottom device values directly */
537+ t -> atomic_write_hw_unit_max = b -> atomic_write_hw_unit_max ;
538+ t -> atomic_write_hw_unit_min = b -> atomic_write_hw_unit_min ;
539+ t -> atomic_write_hw_max = b -> atomic_write_hw_max ;
540+ return true;
541+ }
542+
543+ /*
544+ * Find values for limits which work for chunk size.
545+ * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
546+ * size (t->io_min), as chunk size is not restricted to a power-of-2.
547+ * So we need to find highest power-of-2 which works for the chunk
548+ * size.
549+ * As an example scenario, we could have b->unit_max = 16K and
550+ * t->io_min = 24K. For this case, reduce t->unit_max to a value
551+ * aligned with both limits, i.e. 8K in this example.
552+ */
553+ t -> atomic_write_hw_unit_max = b -> atomic_write_hw_unit_max ;
554+ while (t -> io_min % t -> atomic_write_hw_unit_max )
555+ t -> atomic_write_hw_unit_max /= 2 ;
556+
557+ t -> atomic_write_hw_unit_min = min (b -> atomic_write_hw_unit_min ,
558+ t -> atomic_write_hw_unit_max );
559+ t -> atomic_write_hw_max = min (b -> atomic_write_hw_max , t -> io_min );
560+
561+ return true;
562+ }
563+
564+ static void blk_stack_atomic_writes_limits (struct queue_limits * t ,
565+ struct queue_limits * b )
566+ {
567+ if (!(t -> features & BLK_FEAT_ATOMIC_WRITES_STACKED ))
568+ goto unsupported ;
569+
570+ if (!b -> atomic_write_unit_min )
571+ goto unsupported ;
572+
573+ /*
574+ * If atomic_write_hw_max is set, we have already stacked 1x bottom
575+ * device, so check for compliance.
576+ */
577+ if (t -> atomic_write_hw_max ) {
578+ if (!blk_stack_atomic_writes_tail (t , b ))
579+ goto unsupported ;
580+ return ;
581+ }
582+
583+ if (!blk_stack_atomic_writes_head (t , b ))
584+ goto unsupported ;
585+ return ;
586+
587+ unsupported :
588+ t -> atomic_write_hw_max = 0 ;
589+ t -> atomic_write_hw_unit_max = 0 ;
590+ t -> atomic_write_hw_unit_min = 0 ;
591+ t -> atomic_write_hw_boundary = 0 ;
592+ t -> features &= ~BLK_FEAT_ATOMIC_WRITES_STACKED ;
593+ }
594+
482595/**
483596 * blk_stack_limits - adjust queue_limits for stacked devices
484597 * @t: the stacking driver limits (top device)
@@ -639,6 +752,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
639752 t -> zone_write_granularity = 0 ;
640753 t -> max_zone_append_sectors = 0 ;
641754 }
755+ blk_stack_atomic_writes_limits (t , b );
756+
642757 return ret ;
643758}
644759EXPORT_SYMBOL (blk_stack_limits );
0 commit comments