Skip to content

Commit d2b2fea

Browse files
committed
Merge tag 'asm-generic-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic
Pull asm-generic updates from Arnd Bergmann: "Two small patches for the asm-generic header files: Varad Gautam improves the MMIO tracing to be faster when the tracepoints are built into the kernel but disabled, while Qi Xi updates the DO_ONCE logic so that clearing the WARN_ONCE() flags does not change the other DO_ONCE users" * tag 'asm-generic-6.18' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: once: fix race by moving DO_ONCE to separate section asm-generic/io.h: Skip trace helpers if rwmmio events are disabled
2 parents 42cbaee + edcc8a3 commit d2b2fea

File tree

3 files changed

+69
-34
lines changed

3 files changed

+69
-34
lines changed

include/asm-generic/io.h

Lines changed: 66 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@
7575
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
7676
#include <linux/tracepoint-defs.h>
7777

78+
#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
7879
DECLARE_TRACEPOINT(rwmmio_write);
7980
DECLARE_TRACEPOINT(rwmmio_post_write);
8081
DECLARE_TRACEPOINT(rwmmio_read);
@@ -91,6 +92,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
9192

9293
#else
9394

95+
#define rwmmio_tracepoint_enabled(tracepoint) false
9496
static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
9597
unsigned long caller_addr, unsigned long caller_addr0) {}
9698
static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
@@ -189,11 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr)
189191
{
190192
u8 val;
191193

192-
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
194+
if (rwmmio_tracepoint_enabled(rwmmio_read))
195+
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
193196
__io_br();
194197
val = __raw_readb(addr);
195198
__io_ar(val);
196-
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
199+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
200+
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
197201
return val;
198202
}
199203
#endif
@@ -204,11 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr)
204208
{
205209
u16 val;
206210

207-
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
211+
if (rwmmio_tracepoint_enabled(rwmmio_read))
212+
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
208213
__io_br();
209214
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
210215
__io_ar(val);
211-
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
216+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
217+
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
212218
return val;
213219
}
214220
#endif
@@ -219,11 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr)
219225
{
220226
u32 val;
221227

222-
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
228+
if (rwmmio_tracepoint_enabled(rwmmio_read))
229+
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
223230
__io_br();
224231
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
225232
__io_ar(val);
226-
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
233+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
234+
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
227235
return val;
228236
}
229237
#endif
@@ -235,11 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr)
235243
{
236244
u64 val;
237245

238-
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
246+
if (rwmmio_tracepoint_enabled(rwmmio_read))
247+
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
239248
__io_br();
240249
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
241250
__io_ar(val);
242-
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
251+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
252+
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
243253
return val;
244254
}
245255
#endif
@@ -249,35 +259,41 @@ static inline u64 readq(const volatile void __iomem *addr)
249259
#define writeb writeb
250260
static inline void writeb(u8 value, volatile void __iomem *addr)
251261
{
252-
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
262+
if (rwmmio_tracepoint_enabled(rwmmio_write))
263+
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
253264
__io_bw();
254265
__raw_writeb(value, addr);
255266
__io_aw();
256-
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
267+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
268+
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
257269
}
258270
#endif
259271

260272
#ifndef writew
261273
#define writew writew
262274
static inline void writew(u16 value, volatile void __iomem *addr)
263275
{
264-
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
276+
if (rwmmio_tracepoint_enabled(rwmmio_write))
277+
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
265278
__io_bw();
266279
__raw_writew((u16 __force)cpu_to_le16(value), addr);
267280
__io_aw();
268-
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
281+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
282+
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
269283
}
270284
#endif
271285

272286
#ifndef writel
273287
#define writel writel
274288
static inline void writel(u32 value, volatile void __iomem *addr)
275289
{
276-
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
290+
if (rwmmio_tracepoint_enabled(rwmmio_write))
291+
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
277292
__io_bw();
278293
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
279294
__io_aw();
280-
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
295+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
296+
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
281297
}
282298
#endif
283299

@@ -286,11 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
286302
#define writeq writeq
287303
static inline void writeq(u64 value, volatile void __iomem *addr)
288304
{
289-
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
305+
if (rwmmio_tracepoint_enabled(rwmmio_write))
306+
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
290307
__io_bw();
291308
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
292309
__io_aw();
293-
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
310+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
311+
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
294312
}
295313
#endif
296314
#endif /* CONFIG_64BIT */
@@ -306,9 +324,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
306324
{
307325
u8 val;
308326

309-
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
327+
if (rwmmio_tracepoint_enabled(rwmmio_read))
328+
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
310329
val = __raw_readb(addr);
311-
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
330+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
331+
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
312332
return val;
313333
}
314334
#endif
@@ -319,9 +339,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
319339
{
320340
u16 val;
321341

322-
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
342+
if (rwmmio_tracepoint_enabled(rwmmio_read))
343+
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
323344
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
324-
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
345+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
346+
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
325347
return val;
326348
}
327349
#endif
@@ -332,9 +354,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
332354
{
333355
u32 val;
334356

335-
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
357+
if (rwmmio_tracepoint_enabled(rwmmio_read))
358+
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
336359
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
337-
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
360+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
361+
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
338362
return val;
339363
}
340364
#endif
@@ -345,9 +369,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
345369
{
346370
u64 val;
347371

348-
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
372+
if (rwmmio_tracepoint_enabled(rwmmio_read))
373+
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
349374
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
350-
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
375+
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
376+
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
351377
return val;
352378
}
353379
#endif
@@ -356,39 +382,47 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
356382
#define writeb_relaxed writeb_relaxed
357383
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
358384
{
359-
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
385+
if (rwmmio_tracepoint_enabled(rwmmio_write))
386+
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
360387
__raw_writeb(value, addr);
361-
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
388+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
389+
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
362390
}
363391
#endif
364392

365393
#ifndef writew_relaxed
366394
#define writew_relaxed writew_relaxed
367395
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
368396
{
369-
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
397+
if (rwmmio_tracepoint_enabled(rwmmio_write))
398+
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
370399
__raw_writew((u16 __force)cpu_to_le16(value), addr);
371-
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
400+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
401+
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
372402
}
373403
#endif
374404

375405
#ifndef writel_relaxed
376406
#define writel_relaxed writel_relaxed
377407
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
378408
{
379-
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
409+
if (rwmmio_tracepoint_enabled(rwmmio_write))
410+
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
380411
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
381-
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
412+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
413+
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
382414
}
383415
#endif
384416

385417
#if defined(writeq) && !defined(writeq_relaxed)
386418
#define writeq_relaxed writeq_relaxed
387419
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
388420
{
389-
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
421+
if (rwmmio_tracepoint_enabled(rwmmio_write))
422+
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
390423
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
391-
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
424+
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
425+
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
392426
}
393427
#endif
394428

include/asm-generic/vmlinux.lds.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -361,6 +361,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
361361
__start_once = .; \
362362
*(.data..once) \
363363
__end_once = .; \
364+
*(.data..do_once) \
364365
STRUCT_ALIGN(); \
365366
*(__tracepoints) \
366367
/* implement dynamic printk debug */ \

include/linux/once.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
4646
#define DO_ONCE(func, ...) \
4747
({ \
4848
bool ___ret = false; \
49-
static bool __section(".data..once") ___done = false; \
49+
static bool __section(".data..do_once") ___done = false; \
5050
static DEFINE_STATIC_KEY_TRUE(___once_key); \
5151
if (static_branch_unlikely(&___once_key)) { \
5252
unsigned long ___flags; \
@@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
6464
#define DO_ONCE_SLEEPABLE(func, ...) \
6565
({ \
6666
bool ___ret = false; \
67-
static bool __section(".data..once") ___done = false; \
67+
static bool __section(".data..do_once") ___done = false; \
6868
static DEFINE_STATIC_KEY_TRUE(___once_key); \
6969
if (static_branch_unlikely(&___once_key)) { \
7070
___ret = __do_once_sleepable_start(&___done); \

0 commit comments

Comments
 (0)