@@ -1362,6 +1362,82 @@ static int io_estimate_bvec_size(struct iovec *iov, unsigned nr_iovs,
13621362 return max_segs ;
13631363}
13641364
1365+ static int io_vec_fill_kern_bvec (int ddir , struct iov_iter * iter ,
1366+ struct io_mapped_ubuf * imu ,
1367+ struct iovec * iovec , unsigned nr_iovs ,
1368+ struct iou_vec * vec )
1369+ {
1370+ const struct bio_vec * src_bvec = imu -> bvec ;
1371+ struct bio_vec * res_bvec = vec -> bvec ;
1372+ unsigned res_idx = 0 ;
1373+ size_t total_len = 0 ;
1374+ unsigned iov_idx ;
1375+
1376+ for (iov_idx = 0 ; iov_idx < nr_iovs ; iov_idx ++ ) {
1377+ size_t offset = (size_t )(uintptr_t )iovec [iov_idx ].iov_base ;
1378+ size_t iov_len = iovec [iov_idx ].iov_len ;
1379+ struct bvec_iter bi = {
1380+ .bi_size = offset + iov_len ,
1381+ };
1382+ struct bio_vec bv ;
1383+
1384+ bvec_iter_advance (src_bvec , & bi , offset );
1385+ for_each_mp_bvec (bv , src_bvec , bi , bi )
1386+ res_bvec [res_idx ++ ] = bv ;
1387+ total_len += iov_len ;
1388+ }
1389+ iov_iter_bvec (iter , ddir , res_bvec , res_idx , total_len );
1390+ return 0 ;
1391+ }
1392+
1393+ static int iov_kern_bvec_size (const struct iovec * iov ,
1394+ const struct io_mapped_ubuf * imu ,
1395+ unsigned int * nr_seg )
1396+ {
1397+ size_t offset = (size_t )(uintptr_t )iov -> iov_base ;
1398+ const struct bio_vec * bvec = imu -> bvec ;
1399+ int start = 0 , i = 0 ;
1400+ size_t off = 0 ;
1401+ int ret ;
1402+
1403+ ret = validate_fixed_range (offset , iov -> iov_len , imu );
1404+ if (unlikely (ret ))
1405+ return ret ;
1406+
1407+ for (i = 0 ; off < offset + iov -> iov_len && i < imu -> nr_bvecs ;
1408+ off += bvec [i ].bv_len , i ++ ) {
1409+ if (offset >= off && offset < off + bvec [i ].bv_len )
1410+ start = i ;
1411+ }
1412+ * nr_seg = i - start ;
1413+ return 0 ;
1414+ }
1415+
1416+ static int io_kern_bvec_size (struct iovec * iov , unsigned nr_iovs ,
1417+ struct io_mapped_ubuf * imu , unsigned * nr_segs )
1418+ {
1419+ unsigned max_segs = 0 ;
1420+ size_t total_len = 0 ;
1421+ unsigned i ;
1422+ int ret ;
1423+
1424+ * nr_segs = 0 ;
1425+ for (i = 0 ; i < nr_iovs ; i ++ ) {
1426+ if (unlikely (!iov [i ].iov_len ))
1427+ return - EFAULT ;
1428+ if (unlikely (check_add_overflow (total_len , iov [i ].iov_len ,
1429+ & total_len )))
1430+ return - EOVERFLOW ;
1431+ ret = iov_kern_bvec_size (& iov [i ], imu , & max_segs );
1432+ if (unlikely (ret ))
1433+ return ret ;
1434+ * nr_segs += max_segs ;
1435+ }
1436+ if (total_len > MAX_RW_COUNT )
1437+ return - EINVAL ;
1438+ return 0 ;
1439+ }
1440+
13651441int io_import_reg_vec (int ddir , struct iov_iter * iter ,
13661442 struct io_kiocb * req , struct iou_vec * vec ,
13671443 unsigned nr_iovs , unsigned issue_flags )
@@ -1376,14 +1452,20 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
13761452 if (!node )
13771453 return - EFAULT ;
13781454 imu = node -> buf ;
1379- if (imu -> is_kbuf )
1380- return - EOPNOTSUPP ;
13811455 if (!(imu -> dir & (1 << ddir )))
13821456 return - EFAULT ;
13831457
13841458 iovec_off = vec -> nr - nr_iovs ;
13851459 iov = vec -> iovec + iovec_off ;
1386- nr_segs = io_estimate_bvec_size (iov , nr_iovs , imu );
1460+
1461+ if (imu -> is_kbuf ) {
1462+ int ret = io_kern_bvec_size (iov , nr_iovs , imu , & nr_segs );
1463+
1464+ if (unlikely (ret ))
1465+ return ret ;
1466+ } else {
1467+ nr_segs = io_estimate_bvec_size (iov , nr_iovs , imu );
1468+ }
13871469
13881470 if (sizeof (struct bio_vec ) > sizeof (struct iovec )) {
13891471 size_t bvec_bytes ;
@@ -1410,6 +1492,9 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
14101492 req -> flags |= REQ_F_NEED_CLEANUP ;
14111493 }
14121494
1495+ if (imu -> is_kbuf )
1496+ return io_vec_fill_kern_bvec (ddir , iter , imu , iov , nr_iovs , vec );
1497+
14131498 return io_vec_fill_bvec (ddir , iter , imu , iov , nr_iovs , vec );
14141499}
14151500
0 commit comments