@@ -150,6 +150,7 @@ void fuse_uring_destruct(struct fuse_conn *fc)
150150
151151 for (qid = 0 ; qid < ring -> nr_queues ; qid ++ ) {
152152 struct fuse_ring_queue * queue = ring -> queues [qid ];
153+ struct fuse_ring_ent * ent , * next ;
153154
154155 if (!queue )
155156 continue ;
@@ -159,6 +160,12 @@ void fuse_uring_destruct(struct fuse_conn *fc)
159160 WARN_ON (!list_empty (& queue -> ent_commit_queue ));
160161 WARN_ON (!list_empty (& queue -> ent_in_userspace ));
161162
163+ list_for_each_entry_safe (ent , next , & queue -> ent_released ,
164+ list ) {
165+ list_del_init (& ent -> list );
166+ kfree (ent );
167+ }
168+
162169 kfree (queue -> fpq .processing );
163170 kfree (queue );
164171 ring -> queues [qid ] = NULL ;
@@ -242,6 +249,7 @@ static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring,
242249 INIT_LIST_HEAD (& queue -> ent_in_userspace );
243250 INIT_LIST_HEAD (& queue -> fuse_req_queue );
244251 INIT_LIST_HEAD (& queue -> fuse_req_bg_queue );
252+ INIT_LIST_HEAD (& queue -> ent_released );
245253
246254 queue -> fpq .processing = pq ;
247255 fuse_pqueue_init (& queue -> fpq );
@@ -289,16 +297,22 @@ static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent)
289297 /* remove entry from queue->fpq->processing */
290298 list_del_init (& req -> list );
291299 }
300+
301+ /*
302+ * The entry must not be freed immediately, due to access of direct
303+ * pointer access of entries through IO_URING_F_CANCEL - there is a risk
304+ * of race between daemon termination (which triggers IO_URING_F_CANCEL
305+ * and accesses entries without checking the list state first
306+ */
307+ list_move (& ent -> list , & queue -> ent_released );
308+ ent -> state = FRRS_RELEASED ;
292309 spin_unlock (& queue -> lock );
293310
294311 if (cmd )
295312 io_uring_cmd_done (cmd , - ENOTCONN , 0 , IO_URING_F_UNLOCKED );
296313
297314 if (req )
298315 fuse_uring_stop_fuse_req_end (req );
299-
300- list_del_init (& ent -> list );
301- kfree (ent );
302316}
303317
304318static void fuse_uring_stop_list_entries (struct list_head * head ,
@@ -318,6 +332,7 @@ static void fuse_uring_stop_list_entries(struct list_head *head,
318332 continue ;
319333 }
320334
335+ ent -> state = FRRS_TEARDOWN ;
321336 list_move (& ent -> list , & to_teardown );
322337 }
323338 spin_unlock (& queue -> lock );
@@ -432,6 +447,46 @@ void fuse_uring_stop_queues(struct fuse_ring *ring)
432447 }
433448}
434449
450+ /*
451+ * Handle IO_URING_F_CANCEL, typically should come on daemon termination.
452+ *
453+ * Releasing the last entry should trigger fuse_dev_release() if
454+ * the daemon was terminated
455+ */
456+ static void fuse_uring_cancel (struct io_uring_cmd * cmd ,
457+ unsigned int issue_flags )
458+ {
459+ struct fuse_ring_ent * ent = uring_cmd_to_ring_ent (cmd );
460+ struct fuse_ring_queue * queue ;
461+ bool need_cmd_done = false;
462+
463+ /*
464+ * direct access on ent - it must not be destructed as long as
465+ * IO_URING_F_CANCEL might come up
466+ */
467+ queue = ent -> queue ;
468+ spin_lock (& queue -> lock );
469+ if (ent -> state == FRRS_AVAILABLE ) {
470+ ent -> state = FRRS_USERSPACE ;
471+ list_move (& ent -> list , & queue -> ent_in_userspace );
472+ need_cmd_done = true;
473+ ent -> cmd = NULL ;
474+ }
475+ spin_unlock (& queue -> lock );
476+
477+ if (need_cmd_done ) {
478+ /* no queue lock to avoid lock order issues */
479+ io_uring_cmd_done (cmd , - ENOTCONN , 0 , issue_flags );
480+ }
481+ }
482+
483+ static void fuse_uring_prepare_cancel (struct io_uring_cmd * cmd , int issue_flags ,
484+ struct fuse_ring_ent * ring_ent )
485+ {
486+ uring_cmd_set_ring_ent (cmd , ring_ent );
487+ io_uring_cmd_mark_cancelable (cmd , issue_flags );
488+ }
489+
435490/*
436491 * Checks for errors and stores it into the request
437492 */
@@ -839,6 +894,7 @@ static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags,
839894 spin_unlock (& queue -> lock );
840895
841896 /* without the queue lock, as other locks are taken */
897+ fuse_uring_prepare_cancel (cmd , issue_flags , ent );
842898 fuse_uring_commit (ent , req , issue_flags );
843899
844900 /*
@@ -888,6 +944,8 @@ static void fuse_uring_do_register(struct fuse_ring_ent *ent,
888944 struct fuse_conn * fc = ring -> fc ;
889945 struct fuse_iqueue * fiq = & fc -> iq ;
890946
947+ fuse_uring_prepare_cancel (cmd , issue_flags , ent );
948+
891949 spin_lock (& queue -> lock );
892950 ent -> cmd = cmd ;
893951 fuse_uring_ent_avail (ent , queue );
@@ -1038,6 +1096,11 @@ int __maybe_unused fuse_uring_cmd(struct io_uring_cmd *cmd,
10381096 return - EOPNOTSUPP ;
10391097 }
10401098
1099+ if ((unlikely (issue_flags & IO_URING_F_CANCEL ))) {
1100+ fuse_uring_cancel (cmd , issue_flags );
1101+ return 0 ;
1102+ }
1103+
10411104 /* This extra SQE size holds struct fuse_uring_cmd_req */
10421105 if (!(issue_flags & IO_URING_F_SQE128 ))
10431106 return - EINVAL ;
0 commit comments