@@ -152,7 +152,7 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
152152
153153/*
154154 * Default synchronous end-of-IO handler.. Just mark it up-to-date and
155- * unlock the buffer. This is what ll_rw_block uses too.
155+ * unlock the buffer.
156156 */
157157void end_buffer_read_sync (struct buffer_head * bh , int uptodate )
158158{
@@ -491,8 +491,8 @@ int inode_has_buffers(struct inode *inode)
491491 * all already-submitted IO to complete, but does not queue any new
492492 * writes to the disk.
493493 *
494- * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
495- * you dirty the buffers, and then use osync_inode_buffers to wait for
494+ * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
495+ * as you dirty the buffers, and then use osync_inode_buffers to wait for
496496 * completion. Any other dirty buffers which are not yet queued for
497497 * write will not be flushed to disk by the osync.
498498 */
@@ -1806,7 +1806,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
18061806 /*
18071807 * The page was marked dirty, but the buffers were
18081808 * clean. Someone wrote them back by hand with
1809- * ll_rw_block /submit_bh. A rare case.
1809+ * write_dirty_buffer /submit_bh. A rare case.
18101810 */
18111811 end_page_writeback (page );
18121812
@@ -3040,61 +3040,6 @@ int submit_bh(blk_opf_t opf, struct buffer_head *bh)
30403040}
30413041EXPORT_SYMBOL (submit_bh );
30423042
3043- /**
3044- * ll_rw_block: low-level access to block devices (DEPRECATED)
3045- * @opf: block layer request operation and flags.
3046- * @nr: number of &struct buffer_heads in the array
3047- * @bhs: array of pointers to &struct buffer_head
3048- *
3049- * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3050- * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
3051- * @opf contains flags modifying the detailed I/O behavior, most notably
3052- * %REQ_RAHEAD.
3053- *
3054- * This function drops any buffer that it cannot get a lock on (with the
3055- * BH_Lock state bit), any buffer that appears to be clean when doing a write
3056- * request, and any buffer that appears to be up-to-date when doing read
3057- * request. Further it marks as clean buffers that are processed for
3058- * writing (the buffer cache won't assume that they are actually clean
3059- * until the buffer gets unlocked).
3060- *
3061- * ll_rw_block sets b_end_io to simple completion handler that marks
3062- * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
3063- * any waiters.
3064- *
3065- * All of the buffers must be for the same device, and must also be a
3066- * multiple of the current approved size for the device.
3067- */
3068- void ll_rw_block (const blk_opf_t opf , int nr , struct buffer_head * bhs [])
3069- {
3070- const enum req_op op = opf & REQ_OP_MASK ;
3071- int i ;
3072-
3073- for (i = 0 ; i < nr ; i ++ ) {
3074- struct buffer_head * bh = bhs [i ];
3075-
3076- if (!trylock_buffer (bh ))
3077- continue ;
3078- if (op == REQ_OP_WRITE ) {
3079- if (test_clear_buffer_dirty (bh )) {
3080- bh -> b_end_io = end_buffer_write_sync ;
3081- get_bh (bh );
3082- submit_bh (opf , bh );
3083- continue ;
3084- }
3085- } else {
3086- if (!buffer_uptodate (bh )) {
3087- bh -> b_end_io = end_buffer_read_sync ;
3088- get_bh (bh );
3089- submit_bh (opf , bh );
3090- continue ;
3091- }
3092- }
3093- unlock_buffer (bh );
3094- }
3095- }
3096- EXPORT_SYMBOL (ll_rw_block );
3097-
30983043void write_dirty_buffer (struct buffer_head * bh , blk_opf_t op_flags )
30993044{
31003045 lock_buffer (bh );
0 commit comments