2828
2929#include "netdevsim.h"
3030
31+ #define NSIM_RING_SIZE 256
32+
33+ static int nsim_napi_rx (struct nsim_rq * rq , struct sk_buff * skb )
34+ {
35+ if (skb_queue_len (& rq -> skb_queue ) > NSIM_RING_SIZE ) {
36+ dev_kfree_skb_any (skb );
37+ return NET_RX_DROP ;
38+ }
39+
40+ skb_queue_tail (& rq -> skb_queue , skb );
41+ return NET_RX_SUCCESS ;
42+ }
43+
44+ static int nsim_forward_skb (struct net_device * dev , struct sk_buff * skb ,
45+ struct nsim_rq * rq )
46+ {
47+ return __dev_forward_skb (dev , skb ) ?: nsim_napi_rx (rq , skb );
48+ }
49+
3150static netdev_tx_t nsim_start_xmit (struct sk_buff * skb , struct net_device * dev )
3251{
3352 struct netdevsim * ns = netdev_priv (dev );
53+ struct net_device * peer_dev ;
3454 unsigned int len = skb -> len ;
3555 struct netdevsim * peer_ns ;
56+ struct nsim_rq * rq ;
57+ int rxq ;
3658
3759 rcu_read_lock ();
3860 if (!nsim_ipsec_tx (ns , skb ))
@@ -42,10 +64,18 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
4264 if (!peer_ns )
4365 goto out_drop_free ;
4466
67+ peer_dev = peer_ns -> netdev ;
68+ rxq = skb_get_queue_mapping (skb );
69+ if (rxq >= peer_dev -> num_rx_queues )
70+ rxq = rxq % peer_dev -> num_rx_queues ;
71+ rq = & peer_ns -> rq [rxq ];
72+
4573 skb_tx_timestamp (skb );
46- if (unlikely (dev_forward_skb ( peer_ns -> netdev , skb ) == NET_RX_DROP ))
74+ if (unlikely (nsim_forward_skb ( peer_dev , skb , rq ) == NET_RX_DROP ))
4775 goto out_drop_cnt ;
4876
77+ napi_schedule (& rq -> napi );
78+
4979 rcu_read_unlock ();
5080 u64_stats_update_begin (& ns -> syncp );
5181 ns -> tx_packets ++ ;
@@ -300,25 +330,146 @@ static int nsim_get_iflink(const struct net_device *dev)
300330 return iflink ;
301331}
302332
333+ static int nsim_rcv (struct nsim_rq * rq , int budget )
334+ {
335+ struct sk_buff * skb ;
336+ int i ;
337+
338+ for (i = 0 ; i < budget ; i ++ ) {
339+ if (skb_queue_empty (& rq -> skb_queue ))
340+ break ;
341+
342+ skb = skb_dequeue (& rq -> skb_queue );
343+ netif_receive_skb (skb );
344+ }
345+
346+ return i ;
347+ }
348+
349+ static int nsim_poll (struct napi_struct * napi , int budget )
350+ {
351+ struct nsim_rq * rq = container_of (napi , struct nsim_rq , napi );
352+ int done ;
353+
354+ done = nsim_rcv (rq , budget );
355+ napi_complete (napi );
356+
357+ return done ;
358+ }
359+
360+ static int nsim_create_page_pool (struct nsim_rq * rq )
361+ {
362+ struct page_pool_params p = {
363+ .order = 0 ,
364+ .pool_size = NSIM_RING_SIZE ,
365+ .nid = NUMA_NO_NODE ,
366+ .dev = & rq -> napi .dev -> dev ,
367+ .napi = & rq -> napi ,
368+ .dma_dir = DMA_BIDIRECTIONAL ,
369+ .netdev = rq -> napi .dev ,
370+ };
371+
372+ rq -> page_pool = page_pool_create (& p );
373+ if (IS_ERR (rq -> page_pool )) {
374+ int err = PTR_ERR (rq -> page_pool );
375+
376+ rq -> page_pool = NULL ;
377+ return err ;
378+ }
379+ return 0 ;
380+ }
381+
382+ static int nsim_init_napi (struct netdevsim * ns )
383+ {
384+ struct net_device * dev = ns -> netdev ;
385+ struct nsim_rq * rq ;
386+ int err , i ;
387+
388+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ ) {
389+ rq = & ns -> rq [i ];
390+
391+ netif_napi_add (dev , & rq -> napi , nsim_poll );
392+ }
393+
394+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ ) {
395+ rq = & ns -> rq [i ];
396+
397+ err = nsim_create_page_pool (rq );
398+ if (err )
399+ goto err_pp_destroy ;
400+ }
401+
402+ return 0 ;
403+
404+ err_pp_destroy :
405+ while (i -- ) {
406+ page_pool_destroy (ns -> rq [i ].page_pool );
407+ ns -> rq [i ].page_pool = NULL ;
408+ }
409+
410+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ )
411+ __netif_napi_del (& ns -> rq [i ].napi );
412+
413+ return err ;
414+ }
415+
416+ static void nsim_enable_napi (struct netdevsim * ns )
417+ {
418+ struct net_device * dev = ns -> netdev ;
419+ int i ;
420+
421+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ ) {
422+ struct nsim_rq * rq = & ns -> rq [i ];
423+
424+ netif_queue_set_napi (dev , i , NETDEV_QUEUE_TYPE_RX , & rq -> napi );
425+ napi_enable (& rq -> napi );
426+ }
427+ }
428+
303429static int nsim_open (struct net_device * dev )
304430{
305431 struct netdevsim * ns = netdev_priv (dev );
306- struct page_pool_params pp = { 0 } ;
432+ int err ;
307433
308- pp .pool_size = 128 ;
309- pp .dev = & dev -> dev ;
310- pp .dma_dir = DMA_BIDIRECTIONAL ;
311- pp .netdev = dev ;
434+ err = nsim_init_napi (ns );
435+ if (err )
436+ return err ;
312437
313- ns -> pp = page_pool_create (& pp );
314- return PTR_ERR_OR_ZERO (ns -> pp );
438+ nsim_enable_napi (ns );
439+
440+ return 0 ;
441+ }
442+
443+ static void nsim_del_napi (struct netdevsim * ns )
444+ {
445+ struct net_device * dev = ns -> netdev ;
446+ int i ;
447+
448+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ ) {
449+ struct nsim_rq * rq = & ns -> rq [i ];
450+
451+ napi_disable (& rq -> napi );
452+ __netif_napi_del (& rq -> napi );
453+ }
454+ synchronize_net ();
455+
456+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ ) {
457+ page_pool_destroy (ns -> rq [i ].page_pool );
458+ ns -> rq [i ].page_pool = NULL ;
459+ }
315460}
316461
317462static int nsim_stop (struct net_device * dev )
318463{
319464 struct netdevsim * ns = netdev_priv (dev );
465+ struct netdevsim * peer ;
466+
467+ netif_carrier_off (dev );
468+ peer = rtnl_dereference (ns -> peer );
469+ if (peer )
470+ netif_carrier_off (peer -> netdev );
320471
321- page_pool_destroy (ns -> pp );
472+ nsim_del_napi (ns );
322473
323474 return 0 ;
324475}
@@ -437,7 +588,7 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
437588 if (!netif_running (ns -> netdev ) && val ) {
438589 ret = - ENETDOWN ;
439590 } else if (val ) {
440- ns -> page = page_pool_dev_alloc_pages (ns -> pp );
591+ ns -> page = page_pool_dev_alloc_pages (ns -> rq [ 0 ]. page_pool );
441592 if (!ns -> page )
442593 ret = - ENOMEM ;
443594 } else {
@@ -477,6 +628,35 @@ static void nsim_setup(struct net_device *dev)
477628 dev -> xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD ;
478629}
479630
631+ static int nsim_queue_init (struct netdevsim * ns )
632+ {
633+ struct net_device * dev = ns -> netdev ;
634+ int i ;
635+
636+ ns -> rq = kvcalloc (dev -> num_rx_queues , sizeof (* ns -> rq ),
637+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL );
638+ if (!ns -> rq )
639+ return - ENOMEM ;
640+
641+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ )
642+ skb_queue_head_init (& ns -> rq [i ].skb_queue );
643+
644+ return 0 ;
645+ }
646+
647+ static void nsim_queue_free (struct netdevsim * ns )
648+ {
649+ struct net_device * dev = ns -> netdev ;
650+ int i ;
651+
652+ for (i = 0 ; i < dev -> num_rx_queues ; i ++ )
653+ skb_queue_purge_reason (& ns -> rq [i ].skb_queue ,
654+ SKB_DROP_REASON_QUEUE_PURGE );
655+
656+ kvfree (ns -> rq );
657+ ns -> rq = NULL ;
658+ }
659+
480660static int nsim_init_netdevsim (struct netdevsim * ns )
481661{
482662 struct mock_phc * phc ;
@@ -495,10 +675,14 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
495675 goto err_phc_destroy ;
496676
497677 rtnl_lock ();
498- err = nsim_bpf_init (ns );
678+ err = nsim_queue_init (ns );
499679 if (err )
500680 goto err_utn_destroy ;
501681
682+ err = nsim_bpf_init (ns );
683+ if (err )
684+ goto err_rq_destroy ;
685+
502686 nsim_macsec_init (ns );
503687 nsim_ipsec_init (ns );
504688
@@ -512,6 +696,8 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
512696 nsim_ipsec_teardown (ns );
513697 nsim_macsec_teardown (ns );
514698 nsim_bpf_uninit (ns );
699+ err_rq_destroy :
700+ nsim_queue_free (ns );
515701err_utn_destroy :
516702 rtnl_unlock ();
517703 nsim_udp_tunnels_info_destroy (ns -> netdev );
@@ -593,6 +779,7 @@ void nsim_destroy(struct netdevsim *ns)
593779 nsim_macsec_teardown (ns );
594780 nsim_ipsec_teardown (ns );
595781 nsim_bpf_uninit (ns );
782+ nsim_queue_free (ns );
596783 }
597784 rtnl_unlock ();
598785 if (nsim_dev_port_is_pf (ns -> nsim_dev_port ))
0 commit comments