@@ -176,6 +176,9 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim)
176176{
177177 unsigned int boundary_sectors ;
178178
179+ if (!(lim -> features & BLK_FEAT_ATOMIC_WRITES ))
180+ goto unsupported ;
181+
179182 if (!lim -> atomic_write_hw_max )
180183 goto unsupported ;
181184
@@ -504,6 +507,121 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb
504507 return sectors ;
505508}
506509
510+ /* Check if second and later bottom devices are compliant */
511+ static bool blk_stack_atomic_writes_tail (struct queue_limits * t ,
512+ struct queue_limits * b )
513+ {
514+ /* We're not going to support different boundary sizes.. yet */
515+ if (t -> atomic_write_hw_boundary != b -> atomic_write_hw_boundary )
516+ return false;
517+
518+ /* Can't support this */
519+ if (t -> atomic_write_hw_unit_min > b -> atomic_write_hw_unit_max )
520+ return false;
521+
522+ /* Or this */
523+ if (t -> atomic_write_hw_unit_max < b -> atomic_write_hw_unit_min )
524+ return false;
525+
526+ t -> atomic_write_hw_max = min (t -> atomic_write_hw_max ,
527+ b -> atomic_write_hw_max );
528+ t -> atomic_write_hw_unit_min = max (t -> atomic_write_hw_unit_min ,
529+ b -> atomic_write_hw_unit_min );
530+ t -> atomic_write_hw_unit_max = min (t -> atomic_write_hw_unit_max ,
531+ b -> atomic_write_hw_unit_max );
532+ return true;
533+ }
534+
535+ /* Check for valid boundary of first bottom device */
536+ static bool blk_stack_atomic_writes_boundary_head (struct queue_limits * t ,
537+ struct queue_limits * b )
538+ {
539+ /*
540+ * Ensure atomic write boundary is aligned with chunk sectors. Stacked
541+ * devices store chunk sectors in t->io_min.
542+ */
543+ if (b -> atomic_write_hw_boundary > t -> io_min &&
544+ b -> atomic_write_hw_boundary % t -> io_min )
545+ return false;
546+ if (t -> io_min > b -> atomic_write_hw_boundary &&
547+ t -> io_min % b -> atomic_write_hw_boundary )
548+ return false;
549+
550+ t -> atomic_write_hw_boundary = b -> atomic_write_hw_boundary ;
551+ return true;
552+ }
553+
554+
555+ /* Check stacking of first bottom device */
556+ static bool blk_stack_atomic_writes_head (struct queue_limits * t ,
557+ struct queue_limits * b )
558+ {
559+ if (b -> atomic_write_hw_boundary &&
560+ !blk_stack_atomic_writes_boundary_head (t , b ))
561+ return false;
562+
563+ if (t -> io_min <= SECTOR_SIZE ) {
564+ /* No chunk sectors, so use bottom device values directly */
565+ t -> atomic_write_hw_unit_max = b -> atomic_write_hw_unit_max ;
566+ t -> atomic_write_hw_unit_min = b -> atomic_write_hw_unit_min ;
567+ t -> atomic_write_hw_max = b -> atomic_write_hw_max ;
568+ return true;
569+ }
570+
571+ /*
572+ * Find values for limits which work for chunk size.
573+ * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
574+ * size (t->io_min), as chunk size is not restricted to a power-of-2.
575+ * So we need to find highest power-of-2 which works for the chunk
576+ * size.
577+ * As an example scenario, we could have b->unit_max = 16K and
578+ * t->io_min = 24K. For this case, reduce t->unit_max to a value
579+ * aligned with both limits, i.e. 8K in this example.
580+ */
581+ t -> atomic_write_hw_unit_max = b -> atomic_write_hw_unit_max ;
582+ while (t -> io_min % t -> atomic_write_hw_unit_max )
583+ t -> atomic_write_hw_unit_max /= 2 ;
584+
585+ t -> atomic_write_hw_unit_min = min (b -> atomic_write_hw_unit_min ,
586+ t -> atomic_write_hw_unit_max );
587+ t -> atomic_write_hw_max = min (b -> atomic_write_hw_max , t -> io_min );
588+
589+ return true;
590+ }
591+
592+ static void blk_stack_atomic_writes_limits (struct queue_limits * t ,
593+ struct queue_limits * b , sector_t start )
594+ {
595+ if (!(b -> features & BLK_FEAT_ATOMIC_WRITES ))
596+ goto unsupported ;
597+
598+ if (!b -> atomic_write_unit_min )
599+ goto unsupported ;
600+
601+ if (!blk_atomic_write_start_sect_aligned (start , b ))
602+ goto unsupported ;
603+
604+ /*
605+ * If atomic_write_hw_max is set, we have already stacked 1x bottom
606+ * device, so check for compliance.
607+ */
608+ if (t -> atomic_write_hw_max ) {
609+ if (!blk_stack_atomic_writes_tail (t , b ))
610+ goto unsupported ;
611+ return ;
612+ }
613+
614+ if (!blk_stack_atomic_writes_head (t , b ))
615+ goto unsupported ;
616+ return ;
617+
618+ unsupported :
619+ t -> atomic_write_hw_max = 0 ;
620+ t -> atomic_write_hw_unit_max = 0 ;
621+ t -> atomic_write_hw_unit_min = 0 ;
622+ t -> atomic_write_hw_boundary = 0 ;
623+ }
624+
507625/**
508626 * blk_stack_limits - adjust queue_limits for stacked devices
509627 * @t: the stacking driver limits (top device)
@@ -664,6 +782,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
664782 t -> zone_write_granularity = 0 ;
665783 t -> max_zone_append_sectors = 0 ;
666784 }
785+ blk_stack_atomic_writes_limits (t , b , start );
786+
667787 return ret ;
668788}
669789EXPORT_SYMBOL (blk_stack_limits );
0 commit comments