@@ -132,6 +132,14 @@ struct virtnet_interrupt_coalesce {
132132 u32 max_usecs ;
133133};
134134
135+ /* The dma information of pages allocated at a time. */
136+ struct virtnet_rq_dma {
137+ dma_addr_t addr ;
138+ u32 ref ;
139+ u16 len ;
140+ u16 need_sync ;
141+ };
142+
135143/* Internal representation of a send virtqueue */
136144struct send_queue {
137145 /* Virtqueue associated with this send _queue */
@@ -185,6 +193,12 @@ struct receive_queue {
185193 char name [16 ];
186194
187195 struct xdp_rxq_info xdp_rxq ;
196+
197+ /* Record the last dma info to free after new pages is allocated. */
198+ struct virtnet_rq_dma * last_dma ;
199+
200+ /* Do dma by self */
201+ bool do_dma ;
188202};
189203
190204/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -580,6 +594,156 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
580594 return skb ;
581595}
582596
597+ static void virtnet_rq_unmap (struct receive_queue * rq , void * buf , u32 len )
598+ {
599+ struct page * page = virt_to_head_page (buf );
600+ struct virtnet_rq_dma * dma ;
601+ void * head ;
602+ int offset ;
603+
604+ head = page_address (page );
605+
606+ dma = head ;
607+
608+ -- dma -> ref ;
609+
610+ if (dma -> ref ) {
611+ if (dma -> need_sync && len ) {
612+ offset = buf - (head + sizeof (* dma ));
613+
614+ virtqueue_dma_sync_single_range_for_cpu (rq -> vq , dma -> addr , offset ,
615+ len , DMA_FROM_DEVICE );
616+ }
617+
618+ return ;
619+ }
620+
621+ virtqueue_dma_unmap_single_attrs (rq -> vq , dma -> addr , dma -> len ,
622+ DMA_FROM_DEVICE , DMA_ATTR_SKIP_CPU_SYNC );
623+ put_page (page );
624+ }
625+
626+ static void * virtnet_rq_get_buf (struct receive_queue * rq , u32 * len , void * * ctx )
627+ {
628+ void * buf ;
629+
630+ buf = virtqueue_get_buf_ctx (rq -> vq , len , ctx );
631+ if (buf && rq -> do_dma )
632+ virtnet_rq_unmap (rq , buf , * len );
633+
634+ return buf ;
635+ }
636+
637+ static void * virtnet_rq_detach_unused_buf (struct receive_queue * rq )
638+ {
639+ void * buf ;
640+
641+ buf = virtqueue_detach_unused_buf (rq -> vq );
642+ if (buf && rq -> do_dma )
643+ virtnet_rq_unmap (rq , buf , 0 );
644+
645+ return buf ;
646+ }
647+
648+ static void virtnet_rq_init_one_sg (struct receive_queue * rq , void * buf , u32 len )
649+ {
650+ struct virtnet_rq_dma * dma ;
651+ dma_addr_t addr ;
652+ u32 offset ;
653+ void * head ;
654+
655+ if (!rq -> do_dma ) {
656+ sg_init_one (rq -> sg , buf , len );
657+ return ;
658+ }
659+
660+ head = page_address (rq -> alloc_frag .page );
661+
662+ offset = buf - head ;
663+
664+ dma = head ;
665+
666+ addr = dma -> addr - sizeof (* dma ) + offset ;
667+
668+ sg_init_table (rq -> sg , 1 );
669+ rq -> sg [0 ].dma_address = addr ;
670+ rq -> sg [0 ].length = len ;
671+ }
672+
673+ static void * virtnet_rq_alloc (struct receive_queue * rq , u32 size , gfp_t gfp )
674+ {
675+ struct page_frag * alloc_frag = & rq -> alloc_frag ;
676+ struct virtnet_rq_dma * dma ;
677+ void * buf , * head ;
678+ dma_addr_t addr ;
679+
680+ if (unlikely (!skb_page_frag_refill (size , alloc_frag , gfp )))
681+ return NULL ;
682+
683+ head = page_address (alloc_frag -> page );
684+
685+ if (rq -> do_dma ) {
686+ dma = head ;
687+
688+ /* new pages */
689+ if (!alloc_frag -> offset ) {
690+ if (rq -> last_dma ) {
691+ /* Now, the new page is allocated, the last dma
692+ * will not be used. So the dma can be unmapped
693+ * if the ref is 0.
694+ */
695+ virtnet_rq_unmap (rq , rq -> last_dma , 0 );
696+ rq -> last_dma = NULL ;
697+ }
698+
699+ dma -> len = alloc_frag -> size - sizeof (* dma );
700+
701+ addr = virtqueue_dma_map_single_attrs (rq -> vq , dma + 1 ,
702+ dma -> len , DMA_FROM_DEVICE , 0 );
703+ if (virtqueue_dma_mapping_error (rq -> vq , addr ))
704+ return NULL ;
705+
706+ dma -> addr = addr ;
707+ dma -> need_sync = virtqueue_dma_need_sync (rq -> vq , addr );
708+
709+ /* Add a reference to dma to prevent the entire dma from
710+ * being released during error handling. This reference
711+ * will be freed after the pages are no longer used.
712+ */
713+ get_page (alloc_frag -> page );
714+ dma -> ref = 1 ;
715+ alloc_frag -> offset = sizeof (* dma );
716+
717+ rq -> last_dma = dma ;
718+ }
719+
720+ ++ dma -> ref ;
721+ }
722+
723+ buf = head + alloc_frag -> offset ;
724+
725+ get_page (alloc_frag -> page );
726+ alloc_frag -> offset += size ;
727+
728+ return buf ;
729+ }
730+
731+ static void virtnet_rq_set_premapped (struct virtnet_info * vi )
732+ {
733+ int i ;
734+
735+ /* disable for big mode */
736+ if (!vi -> mergeable_rx_bufs && vi -> big_packets )
737+ return ;
738+
739+ for (i = 0 ; i < vi -> max_queue_pairs ; i ++ ) {
740+ if (virtqueue_set_dma_premapped (vi -> rq [i ].vq ))
741+ continue ;
742+
743+ vi -> rq [i ].do_dma = true;
744+ }
745+ }
746+
583747static void free_old_xmit_skbs (struct send_queue * sq , bool in_napi )
584748{
585749 unsigned int len ;
@@ -935,7 +1099,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
9351099 void * buf ;
9361100 int off ;
9371101
938- buf = virtqueue_get_buf (rq -> vq , & buflen );
1102+ buf = virtnet_rq_get_buf (rq , & buflen , NULL );
9391103 if (unlikely (!buf ))
9401104 goto err_buf ;
9411105
@@ -1155,7 +1319,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
11551319 int len ;
11561320
11571321 while (num_buf -- > 1 ) {
1158- buf = virtqueue_get_buf (rq -> vq , & len );
1322+ buf = virtnet_rq_get_buf (rq , & len , NULL );
11591323 if (unlikely (!buf )) {
11601324 pr_debug ("%s: rx error: %d buffers missing\n" ,
11611325 dev -> name , num_buf );
@@ -1263,7 +1427,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
12631427 return - EINVAL ;
12641428
12651429 while (-- * num_buf > 0 ) {
1266- buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx );
1430+ buf = virtnet_rq_get_buf (rq , & len , & ctx );
12671431 if (unlikely (!buf )) {
12681432 pr_debug ("%s: rx error: %d buffers out of %d missing\n" ,
12691433 dev -> name , * num_buf ,
@@ -1492,7 +1656,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
14921656 while (-- num_buf ) {
14931657 int num_skb_frags ;
14941658
1495- buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx );
1659+ buf = virtnet_rq_get_buf (rq , & len , & ctx );
14961660 if (unlikely (!buf )) {
14971661 pr_debug ("%s: rx error: %d buffers out of %d missing\n" ,
14981662 dev -> name , num_buf ,
@@ -1651,7 +1815,6 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
16511815static int add_recvbuf_small (struct virtnet_info * vi , struct receive_queue * rq ,
16521816 gfp_t gfp )
16531817{
1654- struct page_frag * alloc_frag = & rq -> alloc_frag ;
16551818 char * buf ;
16561819 unsigned int xdp_headroom = virtnet_get_headroom (vi );
16571820 void * ctx = (void * )(unsigned long )xdp_headroom ;
@@ -1660,17 +1823,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
16601823
16611824 len = SKB_DATA_ALIGN (len ) +
16621825 SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
1663- if (unlikely (!skb_page_frag_refill (len , alloc_frag , gfp )))
1826+
1827+ buf = virtnet_rq_alloc (rq , len , gfp );
1828+ if (unlikely (!buf ))
16641829 return - ENOMEM ;
16651830
1666- buf = (char * )page_address (alloc_frag -> page ) + alloc_frag -> offset ;
1667- get_page (alloc_frag -> page );
1668- alloc_frag -> offset += len ;
1669- sg_init_one (rq -> sg , buf + VIRTNET_RX_PAD + xdp_headroom ,
1670- vi -> hdr_len + GOOD_PACKET_LEN );
1831+ virtnet_rq_init_one_sg (rq , buf + VIRTNET_RX_PAD + xdp_headroom ,
1832+ vi -> hdr_len + GOOD_PACKET_LEN );
1833+
16711834 err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
1672- if (err < 0 )
1835+ if (err < 0 ) {
1836+ if (rq -> do_dma )
1837+ virtnet_rq_unmap (rq , buf , 0 );
16731838 put_page (virt_to_head_page (buf ));
1839+ }
1840+
16741841 return err ;
16751842}
16761843
@@ -1747,23 +1914,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
17471914 unsigned int headroom = virtnet_get_headroom (vi );
17481915 unsigned int tailroom = headroom ? sizeof (struct skb_shared_info ) : 0 ;
17491916 unsigned int room = SKB_DATA_ALIGN (headroom + tailroom );
1750- char * buf ;
1917+ unsigned int len , hole ;
17511918 void * ctx ;
1919+ char * buf ;
17521920 int err ;
1753- unsigned int len , hole ;
17541921
17551922 /* Extra tailroom is needed to satisfy XDP's assumption. This
17561923 * means rx frags coalescing won't work, but consider we've
17571924 * disabled GSO for XDP, it won't be a big issue.
17581925 */
17591926 len = get_mergeable_buf_len (rq , & rq -> mrg_avg_pkt_len , room );
1760- if (unlikely (!skb_page_frag_refill (len + room , alloc_frag , gfp )))
1927+
1928+ buf = virtnet_rq_alloc (rq , len + room , gfp );
1929+ if (unlikely (!buf ))
17611930 return - ENOMEM ;
17621931
1763- buf = (char * )page_address (alloc_frag -> page ) + alloc_frag -> offset ;
17641932 buf += headroom ; /* advance address leaving hole at front of pkt */
1765- get_page (alloc_frag -> page );
1766- alloc_frag -> offset += len + room ;
17671933 hole = alloc_frag -> size - alloc_frag -> offset ;
17681934 if (hole < len + room ) {
17691935 /* To avoid internal fragmentation, if there is very likely not
@@ -1777,11 +1943,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
17771943 alloc_frag -> offset += hole ;
17781944 }
17791945
1780- sg_init_one (rq -> sg , buf , len );
1946+ virtnet_rq_init_one_sg (rq , buf , len );
1947+
17811948 ctx = mergeable_len_to_ctx (len + room , headroom );
17821949 err = virtqueue_add_inbuf_ctx (rq -> vq , rq -> sg , 1 , buf , ctx , gfp );
1783- if (err < 0 )
1950+ if (err < 0 ) {
1951+ if (rq -> do_dma )
1952+ virtnet_rq_unmap (rq , buf , 0 );
17841953 put_page (virt_to_head_page (buf ));
1954+ }
17851955
17861956 return err ;
17871957}
@@ -1902,13 +2072,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
19022072 void * ctx ;
19032073
19042074 while (stats .packets < budget &&
1905- (buf = virtqueue_get_buf_ctx (rq -> vq , & len , & ctx ))) {
2075+ (buf = virtnet_rq_get_buf (rq , & len , & ctx ))) {
19062076 receive_buf (vi , rq , buf , len , ctx , xdp_xmit , & stats );
19072077 stats .packets ++ ;
19082078 }
19092079 } else {
19102080 while (stats .packets < budget &&
1911- (buf = virtqueue_get_buf (rq -> vq , & len )) != NULL ) {
2081+ (buf = virtnet_rq_get_buf (rq , & len , NULL )) != NULL ) {
19122082 receive_buf (vi , rq , buf , len , NULL , xdp_xmit , & stats );
19132083 stats .packets ++ ;
19142084 }
@@ -3808,8 +3978,11 @@ static void free_receive_page_frags(struct virtnet_info *vi)
38083978{
38093979 int i ;
38103980 for (i = 0 ; i < vi -> max_queue_pairs ; i ++ )
3811- if (vi -> rq [i ].alloc_frag .page )
3981+ if (vi -> rq [i ].alloc_frag .page ) {
3982+ if (vi -> rq [i ].do_dma && vi -> rq [i ].last_dma )
3983+ virtnet_rq_unmap (& vi -> rq [i ], vi -> rq [i ].last_dma , 0 );
38123984 put_page (vi -> rq [i ].alloc_frag .page );
3985+ }
38133986}
38143987
38153988static void virtnet_sq_free_unused_buf (struct virtqueue * vq , void * buf )
@@ -3846,9 +4019,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
38464019 }
38474020
38484021 for (i = 0 ; i < vi -> max_queue_pairs ; i ++ ) {
3849- struct virtqueue * vq = vi -> rq [i ].vq ;
3850- while ((buf = virtqueue_detach_unused_buf (vq )) != NULL )
3851- virtnet_rq_free_unused_buf (vq , buf );
4022+ struct receive_queue * rq = & vi -> rq [i ];
4023+
4024+ while ((buf = virtnet_rq_detach_unused_buf (rq )) != NULL )
4025+ virtnet_rq_free_unused_buf (rq -> vq , buf );
38524026 cond_resched ();
38534027 }
38544028}
@@ -4022,6 +4196,8 @@ static int init_vqs(struct virtnet_info *vi)
40224196 if (ret )
40234197 goto err_free ;
40244198
4199+ virtnet_rq_set_premapped (vi );
4200+
40254201 cpus_read_lock ();
40264202 virtnet_set_affinity (vi );
40274203 cpus_read_unlock ();
0 commit comments