@@ -1292,14 +1292,12 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
12921292 queue_delayed_work (nvme_wq , & ctrl -> ka_work , delay );
12931293}
12941294
1295- static enum rq_end_io_ret nvme_keep_alive_end_io (struct request * rq ,
1296- blk_status_t status )
1295+ static void nvme_keep_alive_finish (struct request * rq ,
1296+ blk_status_t status , struct nvme_ctrl * ctrl )
12971297{
1298- struct nvme_ctrl * ctrl = rq -> end_io_data ;
1299- unsigned long flags ;
1300- bool startka = false;
13011298 unsigned long rtt = jiffies - (rq -> deadline - rq -> timeout );
13021299 unsigned long delay = nvme_keep_alive_work_period (ctrl );
1300+ enum nvme_ctrl_state state = nvme_ctrl_state (ctrl );
13031301
13041302 /*
13051303 * Subtract off the keepalive RTT so nvme_keep_alive_work runs
@@ -1313,25 +1311,17 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
13131311 delay = 0 ;
13141312 }
13151313
1316- blk_mq_free_request (rq );
1317-
13181314 if (status ) {
13191315 dev_err (ctrl -> device ,
13201316 "failed nvme_keep_alive_end_io error=%d\n" ,
13211317 status );
1322- return RQ_END_IO_NONE ;
1318+ return ;
13231319 }
13241320
13251321 ctrl -> ka_last_check_time = jiffies ;
13261322 ctrl -> comp_seen = false;
1327- spin_lock_irqsave (& ctrl -> lock , flags );
1328- if (ctrl -> state == NVME_CTRL_LIVE ||
1329- ctrl -> state == NVME_CTRL_CONNECTING )
1330- startka = true;
1331- spin_unlock_irqrestore (& ctrl -> lock , flags );
1332- if (startka )
1323+ if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING )
13331324 queue_delayed_work (nvme_wq , & ctrl -> ka_work , delay );
1334- return RQ_END_IO_NONE ;
13351325}
13361326
13371327static void nvme_keep_alive_work (struct work_struct * work )
@@ -1340,6 +1330,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
13401330 struct nvme_ctrl , ka_work );
13411331 bool comp_seen = ctrl -> comp_seen ;
13421332 struct request * rq ;
1333+ blk_status_t status ;
13431334
13441335 ctrl -> ka_last_check_time = jiffies ;
13451336
@@ -1362,9 +1353,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
13621353 nvme_init_request (rq , & ctrl -> ka_cmd );
13631354
13641355 rq -> timeout = ctrl -> kato * HZ ;
1365- rq -> end_io = nvme_keep_alive_end_io ;
1366- rq -> end_io_data = ctrl ;
1367- blk_execute_rq_nowait (rq , false );
1356+ status = blk_execute_rq ( rq , false) ;
1357+ nvme_keep_alive_finish ( rq , status , ctrl ) ;
1358+ blk_mq_free_request (rq );
13681359}
13691360
13701361static void nvme_start_keep_alive (struct nvme_ctrl * ctrl )
@@ -2458,8 +2449,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
24582449 else
24592450 ctrl -> ctrl_config = NVME_CC_CSS_NVM ;
24602451
2461- if (ctrl -> cap & NVME_CAP_CRMS_CRWMS && ctrl -> cap & NVME_CAP_CRMS_CRIMS )
2462- ctrl -> ctrl_config |= NVME_CC_CRIME ;
2452+ /*
2453+ * Setting CRIME results in CSTS.RDY before the media is ready. This
2454+ * makes it possible for media related commands to return the error
2455+ * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is
2456+ * restructured to handle retries, disable CC.CRIME.
2457+ */
2458+ ctrl -> ctrl_config &= ~NVME_CC_CRIME ;
24632459
24642460 ctrl -> ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12 ) << NVME_CC_MPS_SHIFT ;
24652461 ctrl -> ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE ;
@@ -2489,10 +2485,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl)
24892485 * devices are known to get this wrong. Use the larger of the
24902486 * two values.
24912487 */
2492- if (ctrl -> ctrl_config & NVME_CC_CRIME )
2493- ready_timeout = NVME_CRTO_CRIMT (crto );
2494- else
2495- ready_timeout = NVME_CRTO_CRWMT (crto );
2488+ ready_timeout = NVME_CRTO_CRWMT (crto );
24962489
24972490 if (ready_timeout < timeout )
24982491 dev_warn_once (ctrl -> device , "bad crto:%x cap:%llx\n" ,
0 commit comments