Skip to content

Commit 669038c

Browse files
committed
block: Support atomic writes limits for stacked devices
JIRA: https://issues.redhat.com/browse/RHEL-73514 Allow stacked devices to support atomic writes by aggregating the minimum capability of all bottom devices. Flag BLK_FEAT_ATOMIC_WRITES_STACKED is set for stacked devices which have been enabled to support atomic writes. Some things to note on the implementation: - For simplicity, all bottom devices must have same atomic write boundary value (if any) - The atomic write boundary must be a power-of-2 already, but this restriction could be relaxed. Furthermore, it is now required that the chunk sectors for a top device must be aligned with this boundary. - If a bottom device atomic write unit min/max are not aligned with the top device chunk sectors, the top device atomic write unit min/max are reduced to a value which works for the chunk sectors. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: John Garry <john.g.garry@oracle.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://lore.kernel.org/r/20241118105018.1870052-3-john.g.garry@oracle.com Signed-off-by: Jens Axboe <axboe@kernel.dk> (cherry picked from commit d7f36dc) Signed-off-by: Nigel Croxon <ncroxon@redhat.com>
1 parent 15bf6a4 commit 669038c

File tree

2 files changed

+119
-0
lines changed

2 files changed

+119
-0
lines changed

block/blk-settings.c

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -504,6 +504,119 @@ static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lb
504504
return sectors;
505505
}
506506

507+
/* Check if second and later bottom devices are compliant */
508+
static bool blk_stack_atomic_writes_tail(struct queue_limits *t,
509+
struct queue_limits *b)
510+
{
511+
/* We're not going to support different boundary sizes.. yet */
512+
if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary)
513+
return false;
514+
515+
/* Can't support this */
516+
if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max)
517+
return false;
518+
519+
/* Or this */
520+
if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min)
521+
return false;
522+
523+
t->atomic_write_hw_max = min(t->atomic_write_hw_max,
524+
b->atomic_write_hw_max);
525+
t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min,
526+
b->atomic_write_hw_unit_min);
527+
t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max,
528+
b->atomic_write_hw_unit_max);
529+
return true;
530+
}
531+
532+
/* Check for valid boundary of first bottom device */
533+
static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t,
534+
struct queue_limits *b)
535+
{
536+
/*
537+
* Ensure atomic write boundary is aligned with chunk sectors. Stacked
538+
* devices store chunk sectors in t->io_min.
539+
*/
540+
if (b->atomic_write_hw_boundary > t->io_min &&
541+
b->atomic_write_hw_boundary % t->io_min)
542+
return false;
543+
if (t->io_min > b->atomic_write_hw_boundary &&
544+
t->io_min % b->atomic_write_hw_boundary)
545+
return false;
546+
547+
t->atomic_write_hw_boundary = b->atomic_write_hw_boundary;
548+
return true;
549+
}
550+
551+
552+
/* Check stacking of first bottom device */
553+
static bool blk_stack_atomic_writes_head(struct queue_limits *t,
554+
struct queue_limits *b)
555+
{
556+
if (b->atomic_write_hw_boundary &&
557+
!blk_stack_atomic_writes_boundary_head(t, b))
558+
return false;
559+
560+
if (t->io_min <= SECTOR_SIZE) {
561+
/* No chunk sectors, so use bottom device values directly */
562+
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
563+
t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min;
564+
t->atomic_write_hw_max = b->atomic_write_hw_max;
565+
return true;
566+
}
567+
568+
/*
569+
* Find values for limits which work for chunk size.
570+
* b->atomic_write_hw_unit_{min, max} may not be aligned with chunk
571+
* size (t->io_min), as chunk size is not restricted to a power-of-2.
572+
* So we need to find highest power-of-2 which works for the chunk
573+
* size.
574+
* As an example scenario, we could have b->unit_max = 16K and
575+
* t->io_min = 24K. For this case, reduce t->unit_max to a value
576+
* aligned with both limits, i.e. 8K in this example.
577+
*/
578+
t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max;
579+
while (t->io_min % t->atomic_write_hw_unit_max)
580+
t->atomic_write_hw_unit_max /= 2;
581+
582+
t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min,
583+
t->atomic_write_hw_unit_max);
584+
t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min);
585+
586+
return true;
587+
}
588+
589+
static void blk_stack_atomic_writes_limits(struct queue_limits *t,
590+
struct queue_limits *b)
591+
{
592+
if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED))
593+
goto unsupported;
594+
595+
if (!b->atomic_write_unit_min)
596+
goto unsupported;
597+
598+
/*
599+
* If atomic_write_hw_max is set, we have already stacked 1x bottom
600+
* device, so check for compliance.
601+
*/
602+
if (t->atomic_write_hw_max) {
603+
if (!blk_stack_atomic_writes_tail(t, b))
604+
goto unsupported;
605+
return;
606+
}
607+
608+
if (!blk_stack_atomic_writes_head(t, b))
609+
goto unsupported;
610+
return;
611+
612+
unsupported:
613+
t->atomic_write_hw_max = 0;
614+
t->atomic_write_hw_unit_max = 0;
615+
t->atomic_write_hw_unit_min = 0;
616+
t->atomic_write_hw_boundary = 0;
617+
t->features &= ~BLK_FEAT_ATOMIC_WRITES_STACKED;
618+
}
619+
507620
/**
508621
* blk_stack_limits - adjust queue_limits for stacked devices
509622
* @t: the stacking driver limits (top device)
@@ -664,6 +777,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
664777
t->zone_write_granularity = 0;
665778
t->max_zone_append_sectors = 0;
666779
}
780+
blk_stack_atomic_writes_limits(t, b);
781+
667782
return ret;
668783
}
669784
EXPORT_SYMBOL(blk_stack_limits);

include/linux/blkdev.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -338,6 +338,10 @@ typedef unsigned int __bitwise blk_features_t;
338338
#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
339339
((__force blk_features_t)(1u << 15))
340340

341+
/* stacked device can/does support atomic writes */
342+
#define BLK_FEAT_ATOMIC_WRITES_STACKED \
343+
((__force blk_features_t)(1u << 16))
344+
341345
/*
342346
* Flags automatically inherited when stacking limits.
343347
*/

0 commit comments

Comments
 (0)