@@ -47,10 +47,53 @@ static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd)
4747 return pdu -> ent ;
4848}
4949
50+ static void fuse_uring_flush_bg (struct fuse_ring_queue * queue )
51+ {
52+ struct fuse_ring * ring = queue -> ring ;
53+ struct fuse_conn * fc = ring -> fc ;
54+
55+ lockdep_assert_held (& queue -> lock );
56+ lockdep_assert_held (& fc -> bg_lock );
57+
58+ /*
59+ * Allow one bg request per queue, ignoring global fc limits.
60+ * This prevents a single queue from consuming all resources and
61+ * eliminates the need for remote queue wake-ups when global
62+ * limits are met but this queue has no more waiting requests.
63+ */
64+ while ((fc -> active_background < fc -> max_background ||
65+ !queue -> active_background ) &&
66+ (!list_empty (& queue -> fuse_req_bg_queue ))) {
67+ struct fuse_req * req ;
68+
69+ req = list_first_entry (& queue -> fuse_req_bg_queue ,
70+ struct fuse_req , list );
71+ fc -> active_background ++ ;
72+ queue -> active_background ++ ;
73+
74+ list_move_tail (& req -> list , & queue -> fuse_req_queue );
75+ }
76+ }
77+
5078static void fuse_uring_req_end (struct fuse_ring_ent * ent , struct fuse_req * req ,
5179 int error )
5280{
81+ struct fuse_ring_queue * queue = ent -> queue ;
82+ struct fuse_ring * ring = queue -> ring ;
83+ struct fuse_conn * fc = ring -> fc ;
84+
85+ lockdep_assert_not_held (& queue -> lock );
86+ spin_lock (& queue -> lock );
5387 ent -> fuse_req = NULL ;
88+ if (test_bit (FR_BACKGROUND , & req -> flags )) {
89+ queue -> active_background -- ;
90+ spin_lock (& fc -> bg_lock );
91+ fuse_uring_flush_bg (queue );
92+ spin_unlock (& fc -> bg_lock );
93+ }
94+
95+ spin_unlock (& queue -> lock );
96+
5497 if (error )
5598 req -> out .h .error = error ;
5699
@@ -78,13 +121,21 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
78121{
79122 int qid ;
80123 struct fuse_ring_queue * queue ;
124+ struct fuse_conn * fc = ring -> fc ;
81125
82126 for (qid = 0 ; qid < ring -> nr_queues ; qid ++ ) {
83127 queue = READ_ONCE (ring -> queues [qid ]);
84128 if (!queue )
85129 continue ;
86130
87131 queue -> stopped = true;
132+
133+ WARN_ON_ONCE (ring -> fc -> max_background != UINT_MAX );
134+ spin_lock (& queue -> lock );
135+ spin_lock (& fc -> bg_lock );
136+ fuse_uring_flush_bg (queue );
137+ spin_unlock (& fc -> bg_lock );
138+ spin_unlock (& queue -> lock );
88139 fuse_uring_abort_end_queue_requests (queue );
89140 }
90141}
@@ -190,6 +241,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
190241 INIT_LIST_HEAD (& queue -> ent_w_req_queue );
191242 INIT_LIST_HEAD (& queue -> ent_in_userspace );
192243 INIT_LIST_HEAD (& queue -> fuse_req_queue );
244+ INIT_LIST_HEAD (& queue -> fuse_req_bg_queue );
193245
194246 queue -> fpq .processing = pq ;
195247 fuse_pqueue_init (& queue -> fpq );
@@ -1141,6 +1193,53 @@ void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
11411193 fuse_request_end (req );
11421194}
11431195
1196+ bool fuse_uring_queue_bq_req (struct fuse_req * req )
1197+ {
1198+ struct fuse_conn * fc = req -> fm -> fc ;
1199+ struct fuse_ring * ring = fc -> ring ;
1200+ struct fuse_ring_queue * queue ;
1201+ struct fuse_ring_ent * ent = NULL ;
1202+
1203+ queue = fuse_uring_task_to_queue (ring );
1204+ if (!queue )
1205+ return false;
1206+
1207+ spin_lock (& queue -> lock );
1208+ if (unlikely (queue -> stopped )) {
1209+ spin_unlock (& queue -> lock );
1210+ return false;
1211+ }
1212+
1213+ list_add_tail (& req -> list , & queue -> fuse_req_bg_queue );
1214+
1215+ ent = list_first_entry_or_null (& queue -> ent_avail_queue ,
1216+ struct fuse_ring_ent , list );
1217+ spin_lock (& fc -> bg_lock );
1218+ fc -> num_background ++ ;
1219+ if (fc -> num_background == fc -> max_background )
1220+ fc -> blocked = 1 ;
1221+ fuse_uring_flush_bg (queue );
1222+ spin_unlock (& fc -> bg_lock );
1223+
1224+ /*
1225+ * Due to bg_queue flush limits there might be other bg requests
1226+ * in the queue that need to be handled first. Or no further req
1227+ * might be available.
1228+ */
1229+ req = list_first_entry_or_null (& queue -> fuse_req_queue , struct fuse_req ,
1230+ list );
1231+ if (ent && req ) {
1232+ fuse_uring_add_req_to_ring_ent (ent , req );
1233+ spin_unlock (& queue -> lock );
1234+
1235+ fuse_uring_dispatch_ent (ent );
1236+ } else {
1237+ spin_unlock (& queue -> lock );
1238+ }
1239+
1240+ return true;
1241+ }
1242+
11441243static const struct fuse_iqueue_ops fuse_io_uring_ops = {
11451244 /* should be send over io-uring as enhancement */
11461245 .send_forget = fuse_dev_queue_forget ,
0 commit comments