Skip to content

Commit cb8372e

Browse files
ps-ushankargregkh
authored andcommitted
ublk: refactor recovery configuration flag helpers
[ Upstream commit 3b939b8 ] ublk currently supports the following behaviors on ublk server exit: A: outstanding I/Os get errors, subsequently issued I/Os get errors B: outstanding I/Os get errors, subsequently issued I/Os queue C: outstanding I/Os get reissued, subsequently issued I/Os queue and the following behaviors for recovery of preexisting block devices by a future incarnation of the ublk server: 1: ublk devices stopped on ublk server exit (no recovery possible) 2: ublk devices are recoverable using start/end_recovery commands The userspace interface allows selection of combinations of these behaviors using flags specified at device creation time, namely: default behavior: A + 1 UBLK_F_USER_RECOVERY: B + 2 UBLK_F_USER_RECOVERY|UBLK_F_USER_RECOVERY_REISSUE: C + 2 We can't easily change the userspace interface to allow independent selection of one of {A, B, C} and one of {1, 2}, but we can refactor the internal helpers which test for the flags. Replace the existing helpers with the following set: ublk_nosrv_should_reissue_outstanding: tests for behavior C ublk_nosrv_[dev_]should_queue_io: tests for behavior B ublk_nosrv_should_stop_dev: tests for behavior 1 Signed-off-by: Uday Shankar <ushankar@purestorage.com> Reviewed-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20241007182419.3263186-3-ushankar@purestorage.com Signed-off-by: Jens Axboe <axboe@kernel.dk> Stable-dep-of: 6ee6bd5 ("ublk: fix handling recovery & reissue in ublk_abort_queue()") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 206d0df commit cb8372e

File tree

1 file changed

+42
-20
lines changed

1 file changed

+42
-20
lines changed

drivers/block/ublk_drv.c

Lines changed: 42 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -681,22 +681,44 @@ static int ublk_max_cmd_buf_size(void)
681681
return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH);
682682
}
683683

684-
static inline bool ublk_queue_can_use_recovery_reissue(
685-
struct ublk_queue *ubq)
684+
/*
685+
* Should I/O outstanding to the ublk server when it exits be reissued?
686+
* If not, outstanding I/O will get errors.
687+
*/
688+
static inline bool ublk_nosrv_should_reissue_outstanding(struct ublk_device *ub)
686689
{
687-
return (ubq->flags & UBLK_F_USER_RECOVERY) &&
688-
(ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
690+
return (ub->dev_info.flags & UBLK_F_USER_RECOVERY) &&
691+
(ub->dev_info.flags & UBLK_F_USER_RECOVERY_REISSUE);
689692
}
690693

691-
static inline bool ublk_queue_can_use_recovery(
692-
struct ublk_queue *ubq)
694+
/*
695+
* Should I/O issued while there is no ublk server queue? If not, I/O
696+
* issued while there is no ublk server will get errors.
697+
*/
698+
static inline bool ublk_nosrv_dev_should_queue_io(struct ublk_device *ub)
699+
{
700+
return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
701+
}
702+
703+
/*
704+
* Same as ublk_nosrv_dev_should_queue_io, but uses a queue-local copy
705+
* of the device flags for smaller cache footprint - better for fast
706+
* paths.
707+
*/
708+
static inline bool ublk_nosrv_should_queue_io(struct ublk_queue *ubq)
693709
{
694710
return ubq->flags & UBLK_F_USER_RECOVERY;
695711
}
696712

697-
static inline bool ublk_can_use_recovery(struct ublk_device *ub)
713+
/*
714+
* Should ublk devices be stopped (i.e. no recovery possible) when the
715+
* ublk server exits? If not, devices can be used again by a future
716+
* incarnation of a ublk server via the start_recovery/end_recovery
717+
* commands.
718+
*/
719+
static inline bool ublk_nosrv_should_stop_dev(struct ublk_device *ub)
698720
{
699-
return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
721+
return !(ub->dev_info.flags & UBLK_F_USER_RECOVERY);
700722
}
701723

702724
static void ublk_free_disk(struct gendisk *disk)
@@ -1072,7 +1094,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
10721094
{
10731095
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
10741096

1075-
if (ublk_queue_can_use_recovery_reissue(ubq))
1097+
if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
10761098
blk_mq_requeue_request(req, false);
10771099
else
10781100
ublk_put_req_ref(ubq, req);
@@ -1100,7 +1122,7 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
11001122
struct request *rq)
11011123
{
11021124
/* We cannot process this rq so just requeue it. */
1103-
if (ublk_queue_can_use_recovery(ubq))
1125+
if (ublk_nosrv_dev_should_queue_io(ubq->dev))
11041126
blk_mq_requeue_request(rq, false);
11051127
else
11061128
blk_mq_end_request(rq, BLK_STS_IOERR);
@@ -1245,10 +1267,10 @@ static enum blk_eh_timer_return ublk_timeout(struct request *rq)
12451267
struct ublk_device *ub = ubq->dev;
12461268

12471269
if (ublk_abort_requests(ub, ubq)) {
1248-
if (ublk_can_use_recovery(ub))
1249-
schedule_work(&ub->quiesce_work);
1250-
else
1270+
if (ublk_nosrv_should_stop_dev(ub))
12511271
schedule_work(&ub->stop_work);
1272+
else
1273+
schedule_work(&ub->quiesce_work);
12521274
}
12531275
return BLK_EH_DONE;
12541276
}
@@ -1277,7 +1299,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
12771299
* Note: force_abort is guaranteed to be seen because it is set
12781300
* before request queue is unqiuesced.
12791301
*/
1280-
if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
1302+
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
12811303
return BLK_STS_IOERR;
12821304

12831305
if (unlikely(ubq->canceling)) {
@@ -1517,10 +1539,10 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
15171539
ublk_cancel_cmd(ubq, io, issue_flags);
15181540

15191541
if (need_schedule) {
1520-
if (ublk_can_use_recovery(ub))
1521-
schedule_work(&ub->quiesce_work);
1522-
else
1542+
if (ublk_nosrv_should_stop_dev(ub))
15231543
schedule_work(&ub->stop_work);
1544+
else
1545+
schedule_work(&ub->quiesce_work);
15241546
}
15251547
}
15261548

@@ -1640,7 +1662,7 @@ static void ublk_stop_dev(struct ublk_device *ub)
16401662
mutex_lock(&ub->mutex);
16411663
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
16421664
goto unlock;
1643-
if (ublk_can_use_recovery(ub)) {
1665+
if (ublk_nosrv_dev_should_queue_io(ub)) {
16441666
if (ub->dev_info.state == UBLK_S_DEV_LIVE)
16451667
__ublk_quiesce_dev(ub);
16461668
ublk_unquiesce_dev(ub);
@@ -2738,7 +2760,7 @@ static int ublk_ctrl_start_recovery(struct ublk_device *ub,
27382760
int i;
27392761

27402762
mutex_lock(&ub->mutex);
2741-
if (!ublk_can_use_recovery(ub))
2763+
if (ublk_nosrv_should_stop_dev(ub))
27422764
goto out_unlock;
27432765
if (!ub->nr_queues_ready)
27442766
goto out_unlock;
@@ -2791,7 +2813,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
27912813
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
27922814

27932815
mutex_lock(&ub->mutex);
2794-
if (!ublk_can_use_recovery(ub))
2816+
if (ublk_nosrv_should_stop_dev(ub))
27952817
goto out_unlock;
27962818

27972819
if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {

0 commit comments

Comments
 (0)