Skip to content

Commit 51ea66c

Browse files
committed
xfs: use byte ranges for write cleanup ranges
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2155605 Tested: With xfstests and bz reproducer xfs_buffered_write_iomap_end() currently converts the byte ranges passed to it to filesystem blocks to pass them to the bmap code to punch out delalloc blocks, but then has to convert filesytem blocks back to byte ranges for page cache truncate. We're about to make the page cache truncate go away and replace it with a page cache walk, so having to convert everything to/from/to filesystem blocks is messy and error-prone. It is much easier to pass around byte ranges and convert to page indexes and/or filesystem blocks only where those units are needed. In preparation for the page cache walk being added, add a helper that converts byte ranges to filesystem blocks and calls xfs_bmap_punch_delalloc_range() and convert xfs_buffered_write_iomap_end() to calculate limits in byte ranges. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Darrick J. Wong <djwong@kernel.org> (cherry picked from commit b71f889) Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
1 parent 0ff4cae commit 51ea66c

File tree

1 file changed

+25
-15
lines changed

1 file changed

+25
-15
lines changed

fs/xfs/xfs_iomap.c

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1075,6 +1075,20 @@ xfs_buffered_write_iomap_begin(
10751075
return error;
10761076
}
10771077

1078+
static int
1079+
xfs_buffered_write_delalloc_punch(
1080+
struct inode *inode,
1081+
loff_t start_byte,
1082+
loff_t end_byte)
1083+
{
1084+
struct xfs_mount *mp = XFS_M(inode->i_sb);
1085+
xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, start_byte);
1086+
xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, end_byte);
1087+
1088+
return xfs_bmap_punch_delalloc_range(XFS_I(inode), start_fsb,
1089+
end_fsb - start_fsb);
1090+
}
1091+
10781092
static int
10791093
xfs_buffered_write_iomap_end(
10801094
struct inode *inode,
@@ -1084,10 +1098,9 @@ xfs_buffered_write_iomap_end(
10841098
unsigned flags,
10851099
struct iomap *iomap)
10861100
{
1087-
struct xfs_inode *ip = XFS_I(inode);
1088-
struct xfs_mount *mp = ip->i_mount;
1089-
xfs_fileoff_t start_fsb;
1090-
xfs_fileoff_t end_fsb;
1101+
struct xfs_mount *mp = XFS_M(inode->i_sb);
1102+
loff_t start_byte;
1103+
loff_t end_byte;
10911104
int error = 0;
10921105

10931106
if (iomap->type != IOMAP_DELALLOC)
@@ -1112,13 +1125,13 @@ xfs_buffered_write_iomap_end(
11121125
* the range.
11131126
*/
11141127
if (unlikely(!written))
1115-
start_fsb = XFS_B_TO_FSBT(mp, offset);
1128+
start_byte = round_down(offset, mp->m_sb.sb_blocksize);
11161129
else
1117-
start_fsb = XFS_B_TO_FSB(mp, offset + written);
1118-
end_fsb = XFS_B_TO_FSB(mp, offset + length);
1130+
start_byte = round_up(offset + written, mp->m_sb.sb_blocksize);
1131+
end_byte = round_up(offset + length, mp->m_sb.sb_blocksize);
11191132

11201133
/* Nothing to do if we've written the entire delalloc extent */
1121-
if (start_fsb >= end_fsb)
1134+
if (start_byte >= end_byte)
11221135
return 0;
11231136

11241137
/*
@@ -1128,15 +1141,12 @@ xfs_buffered_write_iomap_end(
11281141
* leave dirty pages with no space reservation in the cache.
11291142
*/
11301143
filemap_invalidate_lock(inode->i_mapping);
1131-
truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1132-
XFS_FSB_TO_B(mp, end_fsb) - 1);
1133-
1134-
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1135-
end_fsb - start_fsb);
1144+
truncate_pagecache_range(inode, start_byte, end_byte - 1);
1145+
error = xfs_buffered_write_delalloc_punch(inode, start_byte, end_byte);
11361146
filemap_invalidate_unlock(inode->i_mapping);
11371147
if (error && !xfs_is_shutdown(mp)) {
1138-
xfs_alert(mp, "%s: unable to clean up ino %lld",
1139-
__func__, ip->i_ino);
1148+
xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
1149+
__func__, XFS_I(inode)->i_ino);
11401150
return error;
11411151
}
11421152
return 0;

0 commit comments

Comments
 (0)