@@ -51,20 +51,23 @@ enum work_bits {
5151 * data contains off-queue information when !WORK_STRUCT_PWQ.
5252 *
5353 * MSB
54- * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ]
55- * 1 bit 4 or 5 bits
54+ * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ]
55+ * 16 bits 1 bit 4 or 5 bits
5656 */
5757 WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS ,
58- WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT ,
58+ WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT ,
5959 WORK_OFFQ_FLAG_END ,
6060 WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT ,
6161
62+ WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS ,
63+ WORK_OFFQ_DISABLE_BITS = 16 ,
64+
6265 /*
6366 * When a work item is off queue, the high bits encode off-queue flags
6467 * and the last pool it was on. Cap pool ID to 31 bits and use the
6568 * highest number to indicate that no pool is associated.
6669 */
67- WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS ,
70+ WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS ,
6871 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT ,
6972 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31 ,
7073};
@@ -96,7 +99,9 @@ enum wq_misc_consts {
9699};
97100
98101/* Convenience constants - of type 'unsigned long', not 'enum'! */
99- #define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT)
102+ #define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT)
103+ #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT)
104+ #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT)
100105#define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1)
101106#define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT)
102107#define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1))
@@ -180,6 +185,9 @@ struct workqueue_attrs {
180185 * Below fields aren't properties of a worker_pool. They only modify how
181186 * :c:func:`apply_workqueue_attrs` select pools and thus don't
182187 * participate in pool hash calculations or equality comparisons.
188+ *
189+ * If @affn_strict is set, @cpumask isn't a property of a worker_pool
190+ * either.
183191 */
184192
185193 /**
@@ -465,7 +473,7 @@ void workqueue_softirq_dead(unsigned int cpu);
465473 * @fmt: printf format for the name of the workqueue
466474 * @flags: WQ_* flags
467475 * @max_active: max in-flight work items, 0 for default
468- * remaining args : args for @fmt
476+ * @... : args for @fmt
469477 *
470478 * For a per-cpu workqueue, @max_active limits the number of in-flight work
471479 * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
@@ -559,6 +567,14 @@ extern bool flush_delayed_work(struct delayed_work *dwork);
559567extern bool cancel_delayed_work (struct delayed_work * dwork );
560568extern bool cancel_delayed_work_sync (struct delayed_work * dwork );
561569
570+ extern bool disable_work (struct work_struct * work );
571+ extern bool disable_work_sync (struct work_struct * work );
572+ extern bool enable_work (struct work_struct * work );
573+
574+ extern bool disable_delayed_work (struct delayed_work * dwork );
575+ extern bool disable_delayed_work_sync (struct delayed_work * dwork );
576+ extern bool enable_delayed_work (struct delayed_work * dwork );
577+
562578extern bool flush_rcu_work (struct rcu_work * rwork );
563579
564580extern void workqueue_set_max_active (struct workqueue_struct * wq ,
@@ -666,6 +682,32 @@ static inline bool schedule_work(struct work_struct *work)
666682 return queue_work (system_wq , work );
667683}
668684
685+ /**
686+ * enable_and_queue_work - Enable and queue a work item on a specific workqueue
687+ * @wq: The target workqueue
688+ * @work: The work item to be enabled and queued
689+ *
690+ * This function combines the operations of enable_work() and queue_work(),
691+ * providing a convenient way to enable and queue a work item in a single call.
692+ * It invokes enable_work() on @work and then queues it if the disable depth
693+ * reached 0. Returns %true if the disable depth reached 0 and @work is queued,
694+ * and %false otherwise.
695+ *
696+ * Note that @work is always queued when disable depth reaches zero. If the
697+ * desired behavior is queueing only if certain events took place while @work is
698+ * disabled, the user should implement the necessary state tracking and perform
699+ * explicit conditional queueing after enable_work().
700+ */
701+ static inline bool enable_and_queue_work (struct workqueue_struct * wq ,
702+ struct work_struct * work )
703+ {
704+ if (enable_work (work )) {
705+ queue_work (wq , work );
706+ return true;
707+ }
708+ return false;
709+ }
710+
669711/*
670712 * Detect attempt to flush system-wide workqueues at compile time when possible.
671713 * Warn attempt to flush system-wide workqueues at runtime.
0 commit comments