@@ -1309,8 +1309,9 @@ static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
13091309/*
13101310 * A target may call dm_accept_partial_bio only from the map routine. It is
13111311 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1312- * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
1313- * __send_duplicate_bios().
1312+ * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
1313+ * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
1314+ * by __send_duplicate_bios().
13141315 *
13151316 * dm_accept_partial_bio informs the dm that the target only wants to process
13161317 * additional n_sectors sectors of the bio and the rest of the data should be
@@ -1343,11 +1344,19 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
13431344 unsigned int bio_sectors = bio_sectors (bio );
13441345
13451346 BUG_ON (dm_tio_flagged (tio , DM_TIO_IS_DUPLICATE_BIO ));
1346- BUG_ON (op_is_zone_mgmt (bio_op (bio )));
1347- BUG_ON (bio_op (bio ) == REQ_OP_ZONE_APPEND );
13481347 BUG_ON (bio_sectors > * tio -> len_ptr );
13491348 BUG_ON (n_sectors > bio_sectors );
13501349
1350+ if (static_branch_unlikely (& zoned_enabled ) &&
1351+ unlikely (bdev_is_zoned (bio -> bi_bdev ))) {
1352+ enum req_op op = bio_op (bio );
1353+
1354+ BUG_ON (op_is_zone_mgmt (op ));
1355+ BUG_ON (op == REQ_OP_WRITE );
1356+ BUG_ON (op == REQ_OP_WRITE_ZEROES );
1357+ BUG_ON (op == REQ_OP_ZONE_APPEND );
1358+ }
1359+
13511360 * tio -> len_ptr -= bio_sectors - n_sectors ;
13521361 bio -> bi_iter .bi_size = n_sectors << SECTOR_SHIFT ;
13531362
@@ -1792,19 +1801,35 @@ static void init_clone_info(struct clone_info *ci, struct dm_io *io,
17921801}
17931802
17941803#ifdef CONFIG_BLK_DEV_ZONED
1795- static inline bool dm_zone_bio_needs_split (struct mapped_device * md ,
1796- struct bio * bio )
1804+ static inline bool dm_zone_bio_needs_split (struct bio * bio )
17971805{
17981806 /*
1799- * For mapped device that need zone append emulation, we must
1800- * split any large BIO that straddles zone boundaries.
1807+ * Special case the zone operations that cannot or should not be split.
18011808 */
1802- return dm_emulate_zone_append (md ) && bio_straddles_zones (bio ) &&
1803- !bio_flagged (bio , BIO_ZONE_WRITE_PLUGGING );
1809+ switch (bio_op (bio )) {
1810+ case REQ_OP_ZONE_APPEND :
1811+ case REQ_OP_ZONE_FINISH :
1812+ case REQ_OP_ZONE_RESET :
1813+ case REQ_OP_ZONE_RESET_ALL :
1814+ return false;
1815+ default :
1816+ break ;
1817+ }
1818+
1819+ /*
1820+ * When mapped devices use the block layer zone write plugging, we must
1821+ * split any large BIO to the mapped device limits to not submit BIOs
1822+ * that span zone boundaries and to avoid potential deadlocks with
1823+ * queue freeze operations.
1824+ */
1825+ return bio_needs_zone_write_plugging (bio ) || bio_straddles_zones (bio );
18041826}
1827+
18051828static inline bool dm_zone_plug_bio (struct mapped_device * md , struct bio * bio )
18061829{
1807- return dm_emulate_zone_append (md ) && blk_zone_plug_bio (bio , 0 );
1830+ if (!bio_needs_zone_write_plugging (bio ))
1831+ return false;
1832+ return blk_zone_plug_bio (bio , 0 );
18081833}
18091834
18101835static blk_status_t __send_zone_reset_all_emulated (struct clone_info * ci ,
@@ -1920,8 +1945,7 @@ static blk_status_t __send_zone_reset_all(struct clone_info *ci)
19201945}
19211946
19221947#else
1923- static inline bool dm_zone_bio_needs_split (struct mapped_device * md ,
1924- struct bio * bio )
1948+ static inline bool dm_zone_bio_needs_split (struct bio * bio )
19251949{
19261950 return false;
19271951}
@@ -1948,9 +1972,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
19481972
19491973 is_abnormal = is_abnormal_io (bio );
19501974 if (static_branch_unlikely (& zoned_enabled )) {
1951- /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */
1952- need_split = (bio_op (bio ) != REQ_OP_ZONE_RESET_ALL ) &&
1953- (is_abnormal || dm_zone_bio_needs_split (md , bio ));
1975+ need_split = is_abnormal || dm_zone_bio_needs_split (bio );
19541976 } else {
19551977 need_split = is_abnormal ;
19561978 }
0 commit comments