@@ -1534,28 +1534,6 @@ static struct bio *raid10_split_bio(struct r10conf *conf,
15341534 return bio ;
15351535}
15361536
1537- static void raid_end_discard_bio (struct r10bio * r10bio )
1538- {
1539- struct r10conf * conf = r10bio -> mddev -> private ;
1540- struct r10bio * first_r10bio ;
1541-
1542- while (atomic_dec_and_test (& r10bio -> remaining )) {
1543-
1544- allow_barrier (conf );
1545-
1546- if (!test_bit (R10BIO_Discard , & r10bio -> state )) {
1547- first_r10bio = (struct r10bio * )r10bio -> master_bio ;
1548- free_r10bio (r10bio );
1549- r10bio = first_r10bio ;
1550- } else {
1551- md_write_end (r10bio -> mddev );
1552- bio_endio (r10bio -> master_bio );
1553- free_r10bio (r10bio );
1554- break ;
1555- }
1556- }
1557- }
1558-
15591537static void raid10_end_discard_request (struct bio * bio )
15601538{
15611539 struct r10bio * r10_bio = bio -> bi_private ;
@@ -1582,7 +1560,11 @@ static void raid10_end_discard_request(struct bio *bio)
15821560 rdev = conf -> mirrors [dev ].rdev ;
15831561 }
15841562
1585- raid_end_discard_bio (r10_bio );
1563+ if (atomic_dec_and_test (& r10_bio -> remaining )) {
1564+ md_write_end (r10_bio -> mddev );
1565+ raid_end_bio_io (r10_bio );
1566+ }
1567+
15861568 rdev_dec_pending (rdev , conf -> mddev );
15871569}
15881570
@@ -1595,9 +1577,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
15951577{
15961578 struct r10conf * conf = mddev -> private ;
15971579 struct geom * geo = & conf -> geo ;
1598- struct r10bio * r10_bio , * first_r10bio ;
1599- int far_copies = geo -> far_copies ;
1600- bool first_copy = true;
1580+ struct r10bio * r10_bio ;
16011581
16021582 int disk ;
16031583 sector_t chunk ;
@@ -1636,20 +1616,30 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
16361616 if (bio_sectors (bio ) < stripe_size * 2 )
16371617 goto out ;
16381618
1639- /* For far and far offset layout, if bio is not aligned with stripe size,
1640- * it splits the part that is not aligned with strip size.
1619+ /* For far offset layout, if bio is not aligned with stripe size, it splits
1620+ * the part that is not aligned with strip size.
16411621 */
16421622 div_u64_rem (bio_start , stripe_size , & remainder );
1643- if (( far_copies > 1 ) && remainder ) {
1623+ if (geo -> far_offset && remainder ) {
16441624 split_size = stripe_size - remainder ;
16451625 bio = raid10_split_bio (conf , bio , split_size , false);
16461626 }
16471627 div_u64_rem (bio_end , stripe_size , & remainder );
1648- if (( far_copies > 1 ) && remainder ) {
1628+ if (geo -> far_offset && remainder ) {
16491629 split_size = bio_sectors (bio ) - remainder ;
16501630 bio = raid10_split_bio (conf , bio , split_size , true);
16511631 }
16521632
1633+ r10_bio = mempool_alloc (& conf -> r10bio_pool , GFP_NOIO );
1634+ r10_bio -> mddev = mddev ;
1635+ r10_bio -> state = 0 ;
1636+ r10_bio -> sectors = 0 ;
1637+ memset (r10_bio -> devs , 0 , sizeof (r10_bio -> devs [0 ]) * geo -> raid_disks );
1638+
1639+ wait_blocked_dev (mddev , r10_bio );
1640+
1641+ r10_bio -> master_bio = bio ;
1642+
16531643 bio_start = bio -> bi_iter .bi_sector ;
16541644 bio_end = bio_end_sector (bio );
16551645
@@ -1675,28 +1665,6 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
16751665 end_disk_offset = (bio_end & geo -> chunk_mask ) +
16761666 (last_stripe_index << geo -> chunk_shift );
16771667
1678- retry_discard :
1679- r10_bio = mempool_alloc (& conf -> r10bio_pool , GFP_NOIO );
1680- r10_bio -> mddev = mddev ;
1681- r10_bio -> state = 0 ;
1682- r10_bio -> sectors = 0 ;
1683- memset (r10_bio -> devs , 0 , sizeof (r10_bio -> devs [0 ]) * geo -> raid_disks );
1684- wait_blocked_dev (mddev , r10_bio );
1685-
1686- /* For far layout it needs more than one r10bio to cover all regions.
1687- * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
1688- * to record the discard bio. Other r10bio->master_bio record the first
1689- * r10bio. The first r10bio only release after all other r10bios finish.
1690- * The discard bio returns only first r10bio finishes
1691- */
1692- if (first_copy ) {
1693- r10_bio -> master_bio = bio ;
1694- set_bit (R10BIO_Discard , & r10_bio -> state );
1695- first_copy = false;
1696- first_r10bio = r10_bio ;
1697- } else
1698- r10_bio -> master_bio = (struct bio * )first_r10bio ;
1699-
17001668 rcu_read_lock ();
17011669 for (disk = 0 ; disk < geo -> raid_disks ; disk ++ ) {
17021670 struct md_rdev * rdev = rcu_dereference (conf -> mirrors [disk ].rdev );
@@ -1787,19 +1755,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
17871755 }
17881756 }
17891757
1790- if (!geo -> far_offset && -- far_copies ) {
1791- first_stripe_index += geo -> stride >> geo -> chunk_shift ;
1792- start_disk_offset += geo -> stride ;
1793- last_stripe_index += geo -> stride >> geo -> chunk_shift ;
1794- end_disk_offset += geo -> stride ;
1795- atomic_inc (& first_r10bio -> remaining );
1796- raid_end_discard_bio (r10_bio );
1797- wait_barrier (conf );
1798- goto retry_discard ;
1758+ if (atomic_dec_and_test (& r10_bio -> remaining )) {
1759+ md_write_end (r10_bio -> mddev );
1760+ raid_end_bio_io (r10_bio );
17991761 }
18001762
1801- raid_end_discard_bio (r10_bio );
1802-
18031763 return 0 ;
18041764out :
18051765 allow_barrier (conf );
0 commit comments