@@ -36,15 +36,19 @@ static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
3636{
3737 while (len ) {
3838 struct io_uring_buf * buf ;
39- u32 this_len ;
39+ u32 buf_len , this_len ;
4040
4141 buf = io_ring_head_to_buf (bl -> buf_ring , bl -> head , bl -> mask );
42- this_len = min_t (int , len , buf -> len );
43- buf -> len -= this_len ;
44- if (buf -> len ) {
42+ buf_len = READ_ONCE (buf -> len );
43+ this_len = min_t (u32 , len , buf_len );
44+ buf_len -= this_len ;
45+ /* Stop looping for invalid buffer length of 0 */
46+ if (buf_len || !this_len ) {
4547 buf -> addr += this_len ;
48+ buf -> len = buf_len ;
4649 return false;
4750 }
51+ buf -> len = 0 ;
4852 bl -> head ++ ;
4953 len -= this_len ;
5054 }
@@ -159,6 +163,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
159163 __u16 tail , head = bl -> head ;
160164 struct io_uring_buf * buf ;
161165 void __user * ret ;
166+ u32 buf_len ;
162167
163168 tail = smp_load_acquire (& br -> tail );
164169 if (unlikely (tail == head ))
@@ -168,8 +173,9 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
168173 req -> flags |= REQ_F_BL_EMPTY ;
169174
170175 buf = io_ring_head_to_buf (br , head , bl -> mask );
171- if (* len == 0 || * len > buf -> len )
172- * len = buf -> len ;
176+ buf_len = READ_ONCE (buf -> len );
177+ if (* len == 0 || * len > buf_len )
178+ * len = buf_len ;
173179 req -> flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT ;
174180 req -> buf_list = bl ;
175181 req -> buf_index = buf -> bid ;
@@ -265,7 +271,7 @@ static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
265271
266272 req -> buf_index = buf -> bid ;
267273 do {
268- u32 len = buf -> len ;
274+ u32 len = READ_ONCE ( buf -> len ) ;
269275
270276 /* truncate end piece, if needed, for non partial buffers */
271277 if (len > arg -> max_len ) {
0 commit comments