@@ -1081,6 +1081,25 @@ static void ublk_complete_rq(struct kref *ref)
10811081 __ublk_complete_rq (req );
10821082}
10831083
1084+ static void ublk_do_fail_rq (struct request * req )
1085+ {
1086+ struct ublk_queue * ubq = req -> mq_hctx -> driver_data ;
1087+
1088+ if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1089+ blk_mq_requeue_request (req , false);
1090+ else
1091+ __ublk_complete_rq (req );
1092+ }
1093+
1094+ static void ublk_fail_rq_fn (struct kref * ref )
1095+ {
1096+ struct ublk_rq_data * data = container_of (ref , struct ublk_rq_data ,
1097+ ref );
1098+ struct request * req = blk_mq_rq_from_pdu (data );
1099+
1100+ ublk_do_fail_rq (req );
1101+ }
1102+
10841103/*
10851104 * Since __ublk_rq_task_work always fails requests immediately during
10861105 * exiting, __ublk_fail_req() is only called from abort context during
@@ -1094,10 +1113,13 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
10941113{
10951114 WARN_ON_ONCE (io -> flags & UBLK_IO_FLAG_ACTIVE );
10961115
1097- if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1098- blk_mq_requeue_request (req , false);
1099- else
1100- ublk_put_req_ref (ubq , req );
1116+ if (ublk_need_req_ref (ubq )) {
1117+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1118+
1119+ kref_put (& data -> ref , ublk_fail_rq_fn );
1120+ } else {
1121+ ublk_do_fail_rq (req );
1122+ }
11011123}
11021124
11031125static void ubq_complete_io_cmd (struct ublk_io * io , int res ,
0 commit comments