Skip to content

Commit 2cec149

Browse files
committed
sbitmap: add __sbitmap_queue_get_batch()
jira LE-4066 Rebuild_History Non-Buildable kernel-4.18.0-553.72.1.el8_10 commit-author Jens Axboe <axboe@kernel.dk> commit 9672b0d The block layer tag allocation batching still calls into sbitmap to get each tag, but we can improve on that. Add __sbitmap_queue_get_batch(), which returns a mask of tags all at once, along with an offset for those tags. An example return would be 0xff, where bits 0..7 are set, with tag_offset == 128. The valid tags in this case would be 128..135. A batch is specific to an individual sbitmap_map, hence it cannot be larger than that. The requested number of tags is automatically reduced to the max that can be satisfied with a single map. On failure, 0 is returned. Caller should fall back to single tag allocation at that point/ Signed-off-by: Jens Axboe <axboe@kernel.dk> (cherry picked from commit 9672b0d) Signed-off-by: Jonathan Maple <jmaple@ciq.com>
1 parent 236f97d commit 2cec149

File tree

2 files changed

+64
-0
lines changed

2 files changed

+64
-0
lines changed

include/linux/sbitmap.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -462,6 +462,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
462462
*/
463463
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
464464

465+
/**
466+
* __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
467+
* @sbq: Bitmap queue to allocate from.
468+
* @nr_tags: number of tags requested
469+
* @offset: offset to add to returned bits
470+
*
471+
* Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
472+
* a bit in the mask returned, and the caller must add @offset to the value to
473+
* get the absolute tag value.
474+
*/
475+
unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
476+
unsigned int *offset);
477+
465478
/**
466479
* __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
467480
* sbitmap_queue, limiting the depth used from each word, with preemption

lib/sbitmap.c

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -511,6 +511,57 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
511511
}
512512
EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
513513

514+
unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
515+
unsigned int *offset)
516+
{
517+
struct sbitmap *sb = &sbq->sb;
518+
unsigned int hint, depth;
519+
unsigned long index, nr;
520+
int i;
521+
522+
if (unlikely(sb->round_robin))
523+
return 0;
524+
525+
depth = READ_ONCE(sb->depth);
526+
hint = update_alloc_hint_before_get(sb, depth);
527+
528+
index = SB_NR_TO_INDEX(sb, hint);
529+
530+
for (i = 0; i < sb->map_nr; i++) {
531+
struct sbitmap_word *map = &sb->map[index];
532+
unsigned long get_mask;
533+
534+
sbitmap_deferred_clear(map);
535+
if (map->word == (1UL << (map->depth - 1)) - 1)
536+
continue;
537+
538+
nr = find_first_zero_bit(&map->word, map->depth);
539+
if (nr + nr_tags <= map->depth) {
540+
atomic_long_t *ptr = (atomic_long_t *) &map->word;
541+
int map_tags = min_t(int, nr_tags, map->depth);
542+
unsigned long val, ret;
543+
544+
get_mask = ((1UL << map_tags) - 1) << nr;
545+
do {
546+
val = READ_ONCE(map->word);
547+
ret = atomic_long_cmpxchg(ptr, val, get_mask | val);
548+
} while (ret != val);
549+
get_mask = (get_mask & ~ret) >> nr;
550+
if (get_mask) {
551+
*offset = nr + (index << sb->shift);
552+
update_alloc_hint_after_get(sb, depth, hint,
553+
*offset + map_tags - 1);
554+
return get_mask;
555+
}
556+
}
557+
/* Jump to next index. */
558+
if (++index >= sb->map_nr)
559+
index = 0;
560+
}
561+
562+
return 0;
563+
}
564+
514565
int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
515566
unsigned int shallow_depth)
516567
{

0 commit comments

Comments
 (0)