Skip to content

Commit bf357be

Browse files
committed
Merge: locking: Rebase locking code to v6.8
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4190 JIRA: https://issues.redhat.com/browse/RHEL-35759 MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4190 Rebase the kernel locking code up to v6.8. There aren't significant locking updates since the 9.4 update. The more significant ones are the seqlock and the locktorture test update. The others are mostly miscellaneous updates and cleanups. Signed-off-by: Waiman Long <longman@redhat.com> Approved-by: Phil Auld <pauld@redhat.com> Approved-by: Steve Best <sbest@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Scott Weaver <scweaver@redhat.com>
2 parents 1ac6566 + 80b7d9b commit bf357be

File tree

25 files changed

+616
-294
lines changed

25 files changed

+616
-294
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2901,6 +2901,38 @@
29012901
to extract confidential information from the kernel
29022902
are also disabled.
29032903

2904+
locktorture.acq_writer_lim= [KNL]
2905+
Set the time limit in jiffies for a lock
2906+
acquisition. Acquisitions exceeding this limit
2907+
will result in a splat once they do complete.
2908+
2909+
locktorture.bind_readers= [KNL]
2910+
Specify the list of CPUs to which the readers are
2911+
to be bound.
2912+
2913+
locktorture.bind_writers= [KNL]
2914+
Specify the list of CPUs to which the writers are
2915+
to be bound.
2916+
2917+
locktorture.call_rcu_chains= [KNL]
2918+
Specify the number of self-propagating call_rcu()
2919+
chains to set up. These are used to ensure that
2920+
there is a high probability of an RCU grace period
2921+
in progress at any given time. Defaults to 0,
2922+
which disables these call_rcu() chains.
2923+
2924+
locktorture.long_hold= [KNL]
2925+
Specify the duration in milliseconds for the
2926+
occasional long-duration lock hold time. Defaults
2927+
to 100 milliseconds. Select 0 to disable.
2928+
2929+
locktorture.nested_locks= [KNL]
2930+
Specify the maximum lock nesting depth that
2931+
locktorture is to exercise, up to a limit of 8
2932+
(MAX_NESTED_LOCKS). Specify zero to disable.
2933+
Note that this parameter is ineffective on types
2934+
of locks that do not support nested acquisition.
2935+
29042936
locktorture.nreaders_stress= [KNL]
29052937
Set the number of locking read-acquisition kthreads.
29062938
Defaults to being automatically set based on the
@@ -2916,6 +2948,25 @@
29162948
Set time (s) between CPU-hotplug operations, or
29172949
zero to disable CPU-hotplug testing.
29182950

2951+
locktorture.rt_boost= [KNL]
2952+
Do periodic testing of real-time lock priority
2953+
boosting. Select 0 to disable, 1 to boost
2954+
only rt_mutex, and 2 to boost unconditionally.
2955+
Defaults to 2, which might seem to be an
2956+
odd choice, but which should be harmless for
2957+
non-real-time spinlocks, due to their disabling
2958+
of preemption. Note that non-realtime mutexes
2959+
disable boosting.
2960+
2961+
locktorture.rt_boost_factor= [KNL]
2962+
Number that determines how often and for how
2963+
long priority boosting is exercised. This is
2964+
scaled down by the number of writers, so that the
2965+
number of boosts per unit time remains roughly
2966+
constant as the number of writers increases.
2967+
On the other hand, the duration of each boost
2968+
increases with the number of writers.
2969+
29192970
locktorture.shuffle_interval= [KNL]
29202971
Set task-shuffle interval (jiffies). Shuffling
29212972
tasks allows some CPUs to go into dyntick-idle
@@ -2941,6 +2992,10 @@
29412992
locktorture.verbose= [KNL]
29422993
Enable additional printk() statements.
29432994

2995+
locktorture.writer_fifo= [KNL]
2996+
Run the write-side locktorture kthreads at
2997+
sched_set_fifo() real-time priority.
2998+
29442999
logibm.irq= [HW,MOUSE] Logitech Bus Mouse Driver
29453000
Format: <irq>
29463001

Documentation/locking/mutex-design.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,12 @@ features that make lock debugging easier and faster:
101101
- Detects multi-task circular deadlocks and prints out all affected
102102
locks and tasks (and only those tasks).
103103

104+
Releasing a mutex is not an atomic operation: Once a mutex release operation
105+
has begun, another context may be able to acquire the mutex before the release
106+
operation has fully completed. The mutex user must ensure that the mutex is not
107+
destroyed while a release operation is still in progress - in other words,
108+
callers of mutex_unlock() must ensure that the mutex stays alive until
109+
mutex_unlock() has returned.
104110

105111
Interfaces
106112
----------

arch/x86/include/asm/sections.h

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@
22
#ifndef _ASM_X86_SECTIONS_H
33
#define _ASM_X86_SECTIONS_H
44

5-
#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
6-
75
#include <asm-generic/sections.h>
86
#include <asm/extable.h>
97

@@ -18,20 +16,4 @@ extern char __end_of_kernel_reserve[];
1816

1917
extern unsigned long _brk_start, _brk_end;
2018

21-
static inline bool arch_is_kernel_initmem_freed(unsigned long addr)
22-
{
23-
/*
24-
* If _brk_start has not been cleared, brk allocation is incomplete,
25-
* and we can not make assumptions about its use.
26-
*/
27-
if (_brk_start)
28-
return 0;
29-
30-
/*
31-
* After brk allocation is complete, space between _brk_end and _end
32-
* is available for allocation.
33-
*/
34-
return addr >= _brk_end && addr < (unsigned long)&_end;
35-
}
36-
3719
#endif /* _ASM_X86_SECTIONS_H */

include/asm-generic/sections.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -130,18 +130,24 @@ static inline bool init_section_intersects(void *virt, size_t size)
130130

131131
/**
132132
* is_kernel_core_data - checks if the pointer address is located in the
133-
* .data section
133+
* .data or .bss section
134134
*
135135
* @addr: address to check
136136
*
137-
* Returns: true if the address is located in .data, false otherwise.
137+
* Returns: true if the address is located in .data or .bss, false otherwise.
138138
* Note: On some archs it may return true for core RODATA, and false
139139
* for others. But will always be true for core RW data.
140140
*/
141141
static inline bool is_kernel_core_data(unsigned long addr)
142142
{
143-
return addr >= (unsigned long)_sdata &&
144-
addr < (unsigned long)_edata;
143+
if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
144+
return true;
145+
146+
if (addr >= (unsigned long)__bss_start &&
147+
addr < (unsigned long)__bss_stop)
148+
return true;
149+
150+
return false;
145151
}
146152

147153
/**

include/linux/cleanup.h

Lines changed: 49 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -125,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
125125
* trivial wrapper around DEFINE_CLASS() above specifically
126126
* for locks.
127127
*
128+
* DEFINE_GUARD_COND(name, ext, condlock)
129+
* wrapper around EXTEND_CLASS above to add conditional lock
130+
* variants to a base class, eg. mutex_trylock() or
131+
* mutex_lock_interruptible().
132+
*
128133
* guard(name):
129-
* an anonymous instance of the (guard) class
134+
* an anonymous instance of the (guard) class, not recommended for
135+
* conditional locks.
130136
*
131137
* scoped_guard (name, args...) { }:
132138
* similar to CLASS(name, scope)(args), except the variable (with the
133139
* explicit name 'scope') is declard in a for-loop such that its scope is
134140
* bound to the next (compound) statement.
135141
*
142+
* for conditional locks the loop body is skipped when the lock is not
143+
* acquired.
144+
*
145+
* scoped_cond_guard (name, fail, args...) { }:
146+
* similar to scoped_guard(), except it does fail when the lock
147+
* acquire fails.
148+
*
136149
*/
137150

138151
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
139-
DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
152+
DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
153+
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
154+
{ return *_T; }
155+
156+
#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
157+
EXTEND_CLASS(_name, _ext, \
158+
({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
159+
class_##_name##_t _T) \
160+
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
161+
{ return class_##_name##_lock_ptr(_T); }
140162

141163
#define guard(_name) \
142164
CLASS(_name, __UNIQUE_ID(guard))
143165

166+
#define __guard_ptr(_name) class_##_name##_lock_ptr
167+
144168
#define scoped_guard(_name, args...) \
145169
for (CLASS(_name, scope)(args), \
146-
*done = NULL; !done; done = (void *)1)
170+
*done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
171+
172+
#define scoped_cond_guard(_name, _fail, args...) \
173+
for (CLASS(_name, scope)(args), \
174+
*done = NULL; !done; done = (void *)1) \
175+
if (!__guard_ptr(_name)(&scope)) _fail; \
176+
else
147177

148178
/*
149179
* Additional helper macros for generating lock guards with types, either for
@@ -152,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
152182
*
153183
* DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
154184
* DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
185+
* DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
155186
*
156187
* will result in the following type:
157188
*
@@ -173,6 +204,11 @@ typedef struct { \
173204
static inline void class_##_name##_destructor(class_##_name##_t *_T) \
174205
{ \
175206
if (_T->lock) { _unlock; } \
207+
} \
208+
\
209+
static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
210+
{ \
211+
return _T->lock; \
176212
}
177213

178214

@@ -201,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
201237
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
202238
__DEFINE_LOCK_GUARD_0(_name, _lock)
203239

240+
#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
241+
EXTEND_CLASS(_name, _ext, \
242+
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
243+
if (_T->lock && !(_condlock)) _T->lock = NULL; \
244+
_t; }), \
245+
typeof_member(class_##_name##_t, lock) l) \
246+
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
247+
{ return class_##_name##_lock_ptr(_T); }
248+
249+
204250
#endif /* __LINUX_GUARDS_H */

include/linux/lockdep.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -447,6 +447,14 @@ extern int lockdep_is_held(const void *);
447447

448448
#endif /* !LOCKDEP */
449449

450+
#ifdef CONFIG_PROVE_LOCKING
451+
void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
452+
453+
#define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
454+
#else
455+
#define lock_set_cmp_fn(lock, ...) do { } while (0)
456+
#endif
457+
450458
enum xhlock_context_t {
451459
XHLOCK_HARD,
452460
XHLOCK_SOFT,

include/linux/lockdep_types.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,11 @@ struct lock_trace;
8585

8686
#define LOCKSTAT_POINTS 4
8787

88+
struct lockdep_map;
89+
typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
90+
const struct lockdep_map *b);
91+
typedef void (*lock_print_fn)(const struct lockdep_map *map);
92+
8893
/*
8994
* The lock-class itself. The order of the structure members matters.
9095
* reinit_class() zeroes the key member and all subsequent members.
@@ -110,6 +115,9 @@ struct lock_class {
110115
struct list_head locks_after, locks_before;
111116

112117
const struct lockdep_subclass_key *key;
118+
lock_cmp_fn cmp_fn;
119+
lock_print_fn print_fn;
120+
113121
unsigned int subclass;
114122
unsigned int dep_gen_id;
115123

include/linux/mutex.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,7 @@ extern void mutex_unlock(struct mutex *lock);
221221
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
222222

223223
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
224-
DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
224+
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
225+
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
225226

226227
#endif /* __LINUX_MUTEX_H */

include/linux/osq_lock.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,6 @@
66
* An MCS like lock especially tailored for optimistic spinning for sleeping
77
* lock implementations (mutex, rwsem, etc).
88
*/
9-
struct optimistic_spin_node {
10-
struct optimistic_spin_node *next, *prev;
11-
int locked; /* 1 if lock acquired */
12-
int cpu; /* encoded CPU # + 1 value */
13-
};
149

1510
struct optimistic_spin_queue {
1611
/*

include/linux/rwsem.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
203203
extern void up_write(struct rw_semaphore *sem);
204204

205205
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
206-
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
207-
208-
DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
209-
DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
206+
DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
207+
DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
210208

209+
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
210+
DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
211211

212212
/*
213213
* downgrade write lock to read lock

0 commit comments

Comments
 (0)