@@ -907,8 +907,10 @@ static int smbd_post_send_iter(struct smbd_connection *info,
907907 .local_dma_lkey = sc -> ib .pd -> local_dma_lkey ,
908908 .direction = DMA_TO_DEVICE ,
909909 };
910+ size_t payload_len = umin (* _remaining_data_length ,
911+ sp -> max_send_size - sizeof (* packet ));
910912
911- rc = smb_extract_iter_to_rdma (iter , * _remaining_data_length ,
913+ rc = smb_extract_iter_to_rdma (iter , payload_len ,
912914 & extract );
913915 if (rc < 0 )
914916 goto err_dma ;
@@ -1013,6 +1015,27 @@ static int smbd_post_send_empty(struct smbd_connection *info)
10131015 return smbd_post_send_iter (info , NULL , & remaining_data_length );
10141016}
10151017
1018+ static int smbd_post_send_full_iter (struct smbd_connection * info ,
1019+ struct iov_iter * iter ,
1020+ int * _remaining_data_length )
1021+ {
1022+ int rc = 0 ;
1023+
1024+ /*
1025+ * smbd_post_send_iter() respects the
1026+ * negotiated max_send_size, so we need to
1027+ * loop until the full iter is posted
1028+ */
1029+
1030+ while (iov_iter_count (iter ) > 0 ) {
1031+ rc = smbd_post_send_iter (info , iter , _remaining_data_length );
1032+ if (rc < 0 )
1033+ break ;
1034+ }
1035+
1036+ return rc ;
1037+ }
1038+
10161039/*
10171040 * Post a receive request to the transport
10181041 * The remote peer can only send data when a receive request is posted
@@ -1452,6 +1475,9 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
14521475 char name [MAX_NAME_LEN ];
14531476 int rc ;
14541477
1478+ if (WARN_ON_ONCE (sp -> max_recv_size < sizeof (struct smbdirect_data_transfer )))
1479+ return - ENOMEM ;
1480+
14551481 scnprintf (name , MAX_NAME_LEN , "smbd_request_%p" , info );
14561482 info -> request_cache =
14571483 kmem_cache_create (
@@ -1469,12 +1495,17 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
14691495 goto out1 ;
14701496
14711497 scnprintf (name , MAX_NAME_LEN , "smbd_response_%p" , info );
1498+
1499+ struct kmem_cache_args response_args = {
1500+ .align = __alignof__(struct smbd_response ),
1501+ .useroffset = (offsetof(struct smbd_response , packet ) +
1502+ sizeof (struct smbdirect_data_transfer )),
1503+ .usersize = sp -> max_recv_size - sizeof (struct smbdirect_data_transfer ),
1504+ };
14721505 info -> response_cache =
1473- kmem_cache_create (
1474- name ,
1475- sizeof (struct smbd_response ) +
1476- sp -> max_recv_size ,
1477- 0 , SLAB_HWCACHE_ALIGN , NULL );
1506+ kmem_cache_create (name ,
1507+ sizeof (struct smbd_response ) + sp -> max_recv_size ,
1508+ & response_args , SLAB_HWCACHE_ALIGN );
14781509 if (!info -> response_cache )
14791510 goto out2 ;
14801511
@@ -1747,35 +1778,39 @@ struct smbd_connection *smbd_get_connection(
17471778}
17481779
17491780/*
1750- * Receive data from receive reassembly queue
1781+ * Receive data from the transport's receive reassembly queue
17511782 * All the incoming data packets are placed in reassembly queue
1752- * buf : the buffer to read data into
1783+ * iter : the buffer to read data into
17531784 * size: the length of data to read
17541785 * return value: actual data read
1755- * Note: this implementation copies the data from reassebmly queue to receive
1786+ *
1787+ * Note: this implementation copies the data from reassembly queue to receive
17561788 * buffers used by upper layer. This is not the optimal code path. A better way
17571789 * to do it is to not have upper layer allocate its receive buffers but rather
17581790 * borrow the buffer from reassembly queue, and return it after data is
17591791 * consumed. But this will require more changes to upper layer code, and also
17601792 * need to consider packet boundaries while they still being reassembled.
17611793 */
1762- static int smbd_recv_buf (struct smbd_connection * info , char * buf ,
1763- unsigned int size )
1794+ int smbd_recv (struct smbd_connection * info , struct msghdr * msg )
17641795{
17651796 struct smbdirect_socket * sc = & info -> socket ;
17661797 struct smbd_response * response ;
17671798 struct smbdirect_data_transfer * data_transfer ;
1799+ size_t size = iov_iter_count (& msg -> msg_iter );
17681800 int to_copy , to_read , data_read , offset ;
17691801 u32 data_length , remaining_data_length , data_offset ;
17701802 int rc ;
17711803
1804+ if (WARN_ON_ONCE (iov_iter_rw (& msg -> msg_iter ) == WRITE ))
1805+ return - EINVAL ; /* It's a bug in upper layer to get there */
1806+
17721807again :
17731808 /*
17741809 * No need to hold the reassembly queue lock all the time as we are
17751810 * the only one reading from the front of the queue. The transport
17761811 * may add more entries to the back of the queue at the same time
17771812 */
1778- log_read (INFO , "size=%d info->reassembly_data_length=%d\n" , size ,
1813+ log_read (INFO , "size=%zd info->reassembly_data_length=%d\n" , size ,
17791814 info -> reassembly_data_length );
17801815 if (info -> reassembly_data_length >= size ) {
17811816 int queue_length ;
@@ -1813,7 +1848,10 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
18131848 if (response -> first_segment && size == 4 ) {
18141849 unsigned int rfc1002_len =
18151850 data_length + remaining_data_length ;
1816- * ((__be32 * )buf ) = cpu_to_be32 (rfc1002_len );
1851+ __be32 rfc1002_hdr = cpu_to_be32 (rfc1002_len );
1852+ if (copy_to_iter (& rfc1002_hdr , sizeof (rfc1002_hdr ),
1853+ & msg -> msg_iter ) != sizeof (rfc1002_hdr ))
1854+ return - EFAULT ;
18171855 data_read = 4 ;
18181856 response -> first_segment = false;
18191857 log_read (INFO , "returning rfc1002 length %d\n" ,
@@ -1822,10 +1860,9 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
18221860 }
18231861
18241862 to_copy = min_t (int , data_length - offset , to_read );
1825- memcpy (
1826- buf + data_read ,
1827- (char * )data_transfer + data_offset + offset ,
1828- to_copy );
1863+ if (copy_to_iter ((char * )data_transfer + data_offset + offset ,
1864+ to_copy , & msg -> msg_iter ) != to_copy )
1865+ return - EFAULT ;
18291866
18301867 /* move on to the next buffer? */
18311868 if (to_copy == data_length - offset ) {
@@ -1890,90 +1927,6 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
18901927 goto again ;
18911928}
18921929
1893- /*
1894- * Receive a page from receive reassembly queue
1895- * page: the page to read data into
1896- * to_read: the length of data to read
1897- * return value: actual data read
1898- */
1899- static int smbd_recv_page (struct smbd_connection * info ,
1900- struct page * page , unsigned int page_offset ,
1901- unsigned int to_read )
1902- {
1903- struct smbdirect_socket * sc = & info -> socket ;
1904- int ret ;
1905- char * to_address ;
1906- void * page_address ;
1907-
1908- /* make sure we have the page ready for read */
1909- ret = wait_event_interruptible (
1910- info -> wait_reassembly_queue ,
1911- info -> reassembly_data_length >= to_read ||
1912- sc -> status != SMBDIRECT_SOCKET_CONNECTED );
1913- if (ret )
1914- return ret ;
1915-
1916- /* now we can read from reassembly queue and not sleep */
1917- page_address = kmap_atomic (page );
1918- to_address = (char * ) page_address + page_offset ;
1919-
1920- log_read (INFO , "reading from page=%p address=%p to_read=%d\n" ,
1921- page , to_address , to_read );
1922-
1923- ret = smbd_recv_buf (info , to_address , to_read );
1924- kunmap_atomic (page_address );
1925-
1926- return ret ;
1927- }
1928-
1929- /*
1930- * Receive data from transport
1931- * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
1932- * return: total bytes read, or 0. SMB Direct will not do partial read.
1933- */
1934- int smbd_recv (struct smbd_connection * info , struct msghdr * msg )
1935- {
1936- char * buf ;
1937- struct page * page ;
1938- unsigned int to_read , page_offset ;
1939- int rc ;
1940-
1941- if (iov_iter_rw (& msg -> msg_iter ) == WRITE ) {
1942- /* It's a bug in upper layer to get there */
1943- cifs_dbg (VFS , "Invalid msg iter dir %u\n" ,
1944- iov_iter_rw (& msg -> msg_iter ));
1945- rc = - EINVAL ;
1946- goto out ;
1947- }
1948-
1949- switch (iov_iter_type (& msg -> msg_iter )) {
1950- case ITER_KVEC :
1951- buf = msg -> msg_iter .kvec -> iov_base ;
1952- to_read = msg -> msg_iter .kvec -> iov_len ;
1953- rc = smbd_recv_buf (info , buf , to_read );
1954- break ;
1955-
1956- case ITER_BVEC :
1957- page = msg -> msg_iter .bvec -> bv_page ;
1958- page_offset = msg -> msg_iter .bvec -> bv_offset ;
1959- to_read = msg -> msg_iter .bvec -> bv_len ;
1960- rc = smbd_recv_page (info , page , page_offset , to_read );
1961- break ;
1962-
1963- default :
1964- /* It's a bug in upper layer to get there */
1965- cifs_dbg (VFS , "Invalid msg type %d\n" ,
1966- iov_iter_type (& msg -> msg_iter ));
1967- rc = - EINVAL ;
1968- }
1969-
1970- out :
1971- /* SMBDirect will read it all or nothing */
1972- if (rc > 0 )
1973- msg -> msg_iter .count = 0 ;
1974- return rc ;
1975- }
1976-
19771930/*
19781931 * Send data to transport
19791932 * Each rqst is transported as a SMBDirect payload
@@ -2032,14 +1985,14 @@ int smbd_send(struct TCP_Server_Info *server,
20321985 klen += rqst -> rq_iov [i ].iov_len ;
20331986 iov_iter_kvec (& iter , ITER_SOURCE , rqst -> rq_iov , rqst -> rq_nvec , klen );
20341987
2035- rc = smbd_post_send_iter (info , & iter , & remaining_data_length );
1988+ rc = smbd_post_send_full_iter (info , & iter , & remaining_data_length );
20361989 if (rc < 0 )
20371990 break ;
20381991
20391992 if (iov_iter_count (& rqst -> rq_iter ) > 0 ) {
20401993 /* And then the data pages if there are any */
2041- rc = smbd_post_send_iter (info , & rqst -> rq_iter ,
2042- & remaining_data_length );
1994+ rc = smbd_post_send_full_iter (info , & rqst -> rq_iter ,
1995+ & remaining_data_length );
20431996 if (rc < 0 )
20441997 break ;
20451998 }
0 commit comments