@@ -1046,15 +1046,14 @@ iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
10461046}
10471047EXPORT_SYMBOL_GPL (iomap_file_buffered_write );
10481048
1049- static int iomap_write_delalloc_ifs_punch (struct inode * inode ,
1049+ static void iomap_write_delalloc_ifs_punch (struct inode * inode ,
10501050 struct folio * folio , loff_t start_byte , loff_t end_byte ,
10511051 struct iomap * iomap , iomap_punch_t punch )
10521052{
10531053 unsigned int first_blk , last_blk , i ;
10541054 loff_t last_byte ;
10551055 u8 blkbits = inode -> i_blkbits ;
10561056 struct iomap_folio_state * ifs ;
1057- int ret = 0 ;
10581057
10591058 /*
10601059 * When we have per-block dirty tracking, there can be
@@ -1064,56 +1063,42 @@ static int iomap_write_delalloc_ifs_punch(struct inode *inode,
10641063 */
10651064 ifs = folio -> private ;
10661065 if (!ifs )
1067- return ret ;
1066+ return ;
10681067
10691068 last_byte = min_t (loff_t , end_byte - 1 ,
10701069 folio_pos (folio ) + folio_size (folio ) - 1 );
10711070 first_blk = offset_in_folio (folio , start_byte ) >> blkbits ;
10721071 last_blk = offset_in_folio (folio , last_byte ) >> blkbits ;
10731072 for (i = first_blk ; i <= last_blk ; i ++ ) {
1074- if (!ifs_block_is_dirty (folio , ifs , i )) {
1075- ret = punch (inode , folio_pos (folio ) + (i << blkbits ),
1073+ if (!ifs_block_is_dirty (folio , ifs , i ))
1074+ punch (inode , folio_pos (folio ) + (i << blkbits ),
10761075 1 << blkbits , iomap );
1077- if (ret )
1078- return ret ;
1079- }
10801076 }
1081-
1082- return ret ;
10831077}
10841078
1085-
1086- static int iomap_write_delalloc_punch (struct inode * inode , struct folio * folio ,
1079+ static void iomap_write_delalloc_punch (struct inode * inode , struct folio * folio ,
10871080 loff_t * punch_start_byte , loff_t start_byte , loff_t end_byte ,
10881081 struct iomap * iomap , iomap_punch_t punch )
10891082{
1090- int ret = 0 ;
1091-
10921083 if (!folio_test_dirty (folio ))
1093- return ret ;
1084+ return ;
10941085
10951086 /* if dirty, punch up to offset */
10961087 if (start_byte > * punch_start_byte ) {
1097- ret = punch (inode , * punch_start_byte ,
1098- start_byte - * punch_start_byte , iomap );
1099- if (ret )
1100- return ret ;
1088+ punch (inode , * punch_start_byte , start_byte - * punch_start_byte ,
1089+ iomap );
11011090 }
11021091
11031092 /* Punch non-dirty blocks within folio */
1104- ret = iomap_write_delalloc_ifs_punch (inode , folio , start_byte , end_byte ,
1093+ iomap_write_delalloc_ifs_punch (inode , folio , start_byte , end_byte ,
11051094 iomap , punch );
1106- if (ret )
1107- return ret ;
11081095
11091096 /*
11101097 * Make sure the next punch start is correctly bound to
11111098 * the end of this data range, not the end of the folio.
11121099 */
11131100 * punch_start_byte = min_t (loff_t , end_byte ,
11141101 folio_pos (folio ) + folio_size (folio ));
1115-
1116- return ret ;
11171102}
11181103
11191104/*
@@ -1133,13 +1118,12 @@ static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
11331118 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
11341119 * simplify range iterations.
11351120 */
1136- static int iomap_write_delalloc_scan (struct inode * inode ,
1121+ static void iomap_write_delalloc_scan (struct inode * inode ,
11371122 loff_t * punch_start_byte , loff_t start_byte , loff_t end_byte ,
11381123 struct iomap * iomap , iomap_punch_t punch )
11391124{
11401125 while (start_byte < end_byte ) {
11411126 struct folio * folio ;
1142- int ret ;
11431127
11441128 /* grab locked page */
11451129 folio = filemap_lock_folio (inode -> i_mapping ,
@@ -1150,20 +1134,14 @@ static int iomap_write_delalloc_scan(struct inode *inode,
11501134 continue ;
11511135 }
11521136
1153- ret = iomap_write_delalloc_punch (inode , folio , punch_start_byte ,
1137+ iomap_write_delalloc_punch (inode , folio , punch_start_byte ,
11541138 start_byte , end_byte , iomap , punch );
1155- if (ret ) {
1156- folio_unlock (folio );
1157- folio_put (folio );
1158- return ret ;
1159- }
11601139
11611140 /* move offset to start of next folio in range */
11621141 start_byte = folio_next_index (folio ) << PAGE_SHIFT ;
11631142 folio_unlock (folio );
11641143 folio_put (folio );
11651144 }
1166- return 0 ;
11671145}
11681146
11691147/*
@@ -1199,13 +1177,12 @@ static int iomap_write_delalloc_scan(struct inode *inode,
11991177 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
12001178 * the code to subtle off-by-one bugs....
12011179 */
1202- static int iomap_write_delalloc_release (struct inode * inode , loff_t start_byte ,
1180+ static void iomap_write_delalloc_release (struct inode * inode , loff_t start_byte ,
12031181 loff_t end_byte , unsigned flags , struct iomap * iomap ,
12041182 iomap_punch_t punch )
12051183{
12061184 loff_t punch_start_byte = start_byte ;
12071185 loff_t scan_end_byte = min (i_size_read (inode ), end_byte );
1208- int error = 0 ;
12091186
12101187 /*
12111188 * Lock the mapping to avoid races with page faults re-instantiating
@@ -1222,13 +1199,15 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
12221199 /*
12231200 * If there is no more data to scan, all that is left is to
12241201 * punch out the remaining range.
1202+ *
1203+ * Note that mapping_seek_hole_data is only supposed to return
1204+ * either an offset or -ENXIO, so WARN on any other error as
1205+ * that would be an API change without updating the callers.
12251206 */
12261207 if (start_byte == - ENXIO || start_byte == scan_end_byte )
12271208 break ;
1228- if (start_byte < 0 ) {
1229- error = start_byte ;
1209+ if (WARN_ON_ONCE (start_byte < 0 ))
12301210 goto out_unlock ;
1231- }
12321211 WARN_ON_ONCE (start_byte < punch_start_byte );
12331212 WARN_ON_ONCE (start_byte > scan_end_byte );
12341213
@@ -1238,10 +1217,8 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
12381217 */
12391218 data_end = mapping_seek_hole_data (inode -> i_mapping , start_byte ,
12401219 scan_end_byte , SEEK_HOLE );
1241- if (data_end < 0 ) {
1242- error = data_end ;
1220+ if (WARN_ON_ONCE (data_end < 0 ))
12431221 goto out_unlock ;
1244- }
12451222
12461223 /*
12471224 * If we race with post-direct I/O invalidation of the page cache,
@@ -1253,21 +1230,18 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
12531230 WARN_ON_ONCE (data_end < start_byte );
12541231 WARN_ON_ONCE (data_end > scan_end_byte );
12551232
1256- error = iomap_write_delalloc_scan (inode , & punch_start_byte ,
1257- start_byte , data_end , iomap , punch );
1258- if (error )
1259- goto out_unlock ;
1233+ iomap_write_delalloc_scan (inode , & punch_start_byte , start_byte ,
1234+ data_end , iomap , punch );
12601235
12611236 /* The next data search starts at the end of this one. */
12621237 start_byte = data_end ;
12631238 }
12641239
12651240 if (punch_start_byte < end_byte )
1266- error = punch (inode , punch_start_byte ,
1267- end_byte - punch_start_byte , iomap );
1241+ punch (inode , punch_start_byte , end_byte - punch_start_byte ,
1242+ iomap );
12681243out_unlock :
12691244 filemap_invalidate_unlock (inode -> i_mapping );
1270- return error ;
12711245}
12721246
12731247/*
@@ -1300,7 +1274,7 @@ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
13001274 * ->punch
13011275 * internal filesystem allocation lock
13021276 */
1303- int iomap_file_buffered_write_punch_delalloc (struct inode * inode ,
1277+ void iomap_file_buffered_write_punch_delalloc (struct inode * inode ,
13041278 loff_t pos , loff_t length , ssize_t written , unsigned flags ,
13051279 struct iomap * iomap , iomap_punch_t punch )
13061280{
@@ -1309,11 +1283,11 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
13091283 unsigned int blocksize = i_blocksize (inode );
13101284
13111285 if (iomap -> type != IOMAP_DELALLOC )
1312- return 0 ;
1286+ return ;
13131287
13141288 /* If we didn't reserve the blocks, we're not allowed to punch them. */
13151289 if (!(iomap -> flags & IOMAP_F_NEW ))
1316- return 0 ;
1290+ return ;
13171291
13181292 /*
13191293 * start_byte refers to the first unused block after a short write. If
@@ -1328,10 +1302,10 @@ int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
13281302
13291303 /* Nothing to do if we've written the entire delalloc extent */
13301304 if (start_byte >= end_byte )
1331- return 0 ;
1305+ return ;
13321306
1333- return iomap_write_delalloc_release (inode , start_byte , end_byte , flags ,
1334- iomap , punch );
1307+ iomap_write_delalloc_release (inode , start_byte , end_byte , flags , iomap ,
1308+ punch );
13351309}
13361310EXPORT_SYMBOL_GPL (iomap_file_buffered_write_punch_delalloc );
13371311
0 commit comments