Skip to content

Commit 02f3fa1

Browse files
committed
lib/sbitmap: define swap_lock as raw_spinlock_t
jira LE-4066 Rebuild_History Non-Buildable kernel-4.18.0-553.72.1.el8_10 commit-author Ming Lei <ming.lei@redhat.com> commit 65f666c Empty-Commit: Cherry-Pick Conflicts during history rebuild. Will be included in final tarball splat. Ref for failed cherry-pick at: ciq/ciq_backports/kernel-4.18.0-553.72.1.el8_10/65f666c6.failed When called from sbitmap_queue_get(), sbitmap_deferred_clear() may be run with preempt disabled. In RT kernel, spin_lock() can sleep, then warning of "BUG: sleeping function called from invalid context" can be triggered. Fix it by replacing it with raw_spin_lock. Cc: Yang Yang <yang.yang@vivo.com> Fixes: 72d04bd ("sbitmap: fix io hung due to race on sbitmap_word::cleared") Signed-off-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Yang Yang <yang.yang@vivo.com> Link: https://lore.kernel.org/r/20240919021709.511329-1-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk> (cherry picked from commit 65f666c) Signed-off-by: Jonathan Maple <jmaple@ciq.com> # Conflicts: # include/linux/sbitmap.h # lib/sbitmap.c
1 parent 8eea495 commit 02f3fa1

File tree

1 file changed

+130
-0
lines changed

1 file changed

+130
-0
lines changed
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
lib/sbitmap: define swap_lock as raw_spinlock_t
2+
3+
jira LE-4066
4+
Rebuild_History Non-Buildable kernel-4.18.0-553.72.1.el8_10
5+
commit-author Ming Lei <ming.lei@redhat.com>
6+
commit 65f666c6203600053478ce8e34a1db269a8701c9
7+
Empty-Commit: Cherry-Pick Conflicts during history rebuild.
8+
Will be included in final tarball splat. Ref for failed cherry-pick at:
9+
ciq/ciq_backports/kernel-4.18.0-553.72.1.el8_10/65f666c6.failed
10+
11+
When called from sbitmap_queue_get(), sbitmap_deferred_clear() may be run
12+
with preempt disabled. In RT kernel, spin_lock() can sleep, then warning
13+
of "BUG: sleeping function called from invalid context" can be triggered.
14+
15+
Fix it by replacing it with raw_spin_lock.
16+
17+
Cc: Yang Yang <yang.yang@vivo.com>
18+
Fixes: 72d04bdcf3f7 ("sbitmap: fix io hung due to race on sbitmap_word::cleared")
19+
Signed-off-by: Ming Lei <ming.lei@redhat.com>
20+
Reviewed-by: Yang Yang <yang.yang@vivo.com>
21+
Link: https://lore.kernel.org/r/20240919021709.511329-1-ming.lei@redhat.com
22+
Signed-off-by: Jens Axboe <axboe@kernel.dk>
23+
(cherry picked from commit 65f666c6203600053478ce8e34a1db269a8701c9)
24+
Signed-off-by: Jonathan Maple <jmaple@ciq.com>
25+
26+
# Conflicts:
27+
# include/linux/sbitmap.h
28+
# lib/sbitmap.c
29+
diff --cc include/linux/sbitmap.h
30+
index c5364c7c3ae5,189140bf11fc..000000000000
31+
--- a/include/linux/sbitmap.h
32+
+++ b/include/linux/sbitmap.h
33+
@@@ -40,9 -38,9 +40,13 @@@ struct sbitmap_word
34+
unsigned long cleared ____cacheline_aligned_in_smp;
35+
36+
/**
37+
- * @swap_lock: serializes simultaneous updates of ->word and ->cleared
38+
+ * @swap_lock: Held while swapping word <-> cleared
39+
*/
40+
++<<<<<<< HEAD
41+
+ RH_KABI_DEPRECATE(spinlock_t, swap_lock)
42+
++=======
43+
+ raw_spinlock_t swap_lock;
44+
++>>>>>>> 65f666c62036 (lib/sbitmap: define swap_lock as raw_spinlock_t)
45+
} ____cacheline_aligned_in_smp;
46+
47+
/**
48+
diff --cc lib/sbitmap.c
49+
index 59f985b64b05,d3412984170c..000000000000
50+
--- a/lib/sbitmap.c
51+
+++ b/lib/sbitmap.c
52+
@@@ -76,12 -60,30 +76,34 @@@ static inline void update_alloc_hint_af
53+
/*
54+
* See if we have deferred clears that we can batch move
55+
*/
56+
-static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
57+
- unsigned int depth, unsigned int alloc_hint, bool wrap)
58+
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
59+
{
60+
- unsigned long mask, word_mask;
61+
+ unsigned long mask;
62+
63+
++<<<<<<< HEAD
64+
+ if (!READ_ONCE(map->cleared))
65+
+ return false;
66+
++=======
67+
+ guard(raw_spinlock_irqsave)(&map->swap_lock);
68+
+
69+
+ if (!map->cleared) {
70+
+ if (depth == 0)
71+
+ return false;
72+
+
73+
+ word_mask = (~0UL) >> (BITS_PER_LONG - depth);
74+
+ /*
75+
+ * The current behavior is to always retry after moving
76+
+ * ->cleared to word, and we change it to retry in case
77+
+ * of any free bits. To avoid an infinite loop, we need
78+
+ * to take wrap & alloc_hint into account, otherwise a
79+
+ * soft lockup may occur.
80+
+ */
81+
+ if (!wrap && alloc_hint)
82+
+ word_mask &= ~((1UL << alloc_hint) - 1);
83+
+
84+
+ return (READ_ONCE(map->word) & word_mask) != word_mask;
85+
+ }
86+
++>>>>>>> 65f666c62036 (lib/sbitmap: define swap_lock as raw_spinlock_t)
87+
88+
/*
89+
* First get a stable cleared mask, setting the old mask to 0.
90+
@@@ -119,24 -122,21 +141,29 @@@ int sbitmap_init_node(struct sbitmap *s
91+
return 0;
92+
}
93+
94+
- if (alloc_hint) {
95+
- if (init_alloc_hint(sb, flags))
96+
- return -ENOMEM;
97+
- } else {
98+
- sb->alloc_hint = NULL;
99+
- }
100+
-
101+
- sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
102+
+ /*
103+
+ * RHEL8: sb->alloc_hint is kept in an additional sb->map array element prior
104+
+ * to the actual sb->map due to KABI (it was not possible to add it to struct sbitmap)
105+
+ */
106+
+ sb->map = kvzalloc_node((sb->map_nr + 1) * sizeof(*sb->map), flags, node);
107+
if (!sb->map) {
108+
- free_percpu(sb->alloc_hint);
109+
return -ENOMEM;
110+
}
111+
+ sb->map++;
112+
113+
++<<<<<<< HEAD
114+
+ if (alloc_hint) {
115+
+ if (init_alloc_hint(sb, flags)) {
116+
+ kfree(--sb->map);
117+
+ return -ENOMEM;
118+
+ }
119+
+ } else {
120+
+ *SB_ALLOC_HINT_PTR(sb) = NULL;
121+
+ }
122+
++=======
123+
+ for (i = 0; i < sb->map_nr; i++)
124+
+ raw_spin_lock_init(&sb->map[i].swap_lock);
125+
++>>>>>>> 65f666c62036 (lib/sbitmap: define swap_lock as raw_spinlock_t)
126+
127+
return 0;
128+
}
129+
* Unmerged path include/linux/sbitmap.h
130+
* Unmerged path lib/sbitmap.c

0 commit comments

Comments
 (0)