@@ -85,6 +85,7 @@ static void tcf_mirred_release(struct tc_action *a)
8585
8686static const struct nla_policy mirred_policy [TCA_MIRRED_MAX + 1 ] = {
8787 [TCA_MIRRED_PARMS ] = { .len = sizeof (struct tc_mirred ) },
88+ [TCA_MIRRED_BLOCKID ] = NLA_POLICY_MIN (NLA_U32 , 1 ),
8889};
8990
9091static struct tc_action_ops act_mirred_ops ;
@@ -136,6 +137,17 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
136137 if (exists && bind )
137138 return 0 ;
138139
140+ if (tb [TCA_MIRRED_BLOCKID ] && parm -> ifindex ) {
141+ NL_SET_ERR_MSG_MOD (extack ,
142+ "Cannot specify Block ID and dev simultaneously" );
143+ if (exists )
144+ tcf_idr_release (* a , bind );
145+ else
146+ tcf_idr_cleanup (tn , index );
147+
148+ return - EINVAL ;
149+ }
150+
139151 switch (parm -> eaction ) {
140152 case TCA_EGRESS_MIRROR :
141153 case TCA_EGRESS_REDIR :
@@ -152,9 +164,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
152164 }
153165
154166 if (!exists ) {
155- if (!parm -> ifindex ) {
167+ if (!parm -> ifindex && ! tb [ TCA_MIRRED_BLOCKID ] ) {
156168 tcf_idr_cleanup (tn , index );
157- NL_SET_ERR_MSG_MOD (extack , "Specified device does not exist" );
169+ NL_SET_ERR_MSG_MOD (extack ,
170+ "Must specify device or block" );
158171 return - EINVAL ;
159172 }
160173 ret = tcf_idr_create_from_flags (tn , index , est , a ,
@@ -192,6 +205,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
192205 tcf_mirred_replace_dev (m , ndev );
193206 netdev_tracker_alloc (ndev , & m -> tcfm_dev_tracker , GFP_ATOMIC );
194207 m -> tcfm_mac_header_xmit = mac_header_xmit ;
208+ m -> tcfm_blockid = 0 ;
209+ } else if (tb [TCA_MIRRED_BLOCKID ]) {
210+ tcf_mirred_replace_dev (m , NULL );
211+ m -> tcfm_mac_header_xmit = false;
212+ m -> tcfm_blockid = nla_get_u32 (tb [TCA_MIRRED_BLOCKID ]);
195213 }
196214 goto_ch = tcf_action_set_ctrlact (* a , parm -> action , goto_ch );
197215 m -> tcfm_eaction = parm -> eaction ;
@@ -316,6 +334,89 @@ static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
316334 return retval ;
317335}
318336
337+ static int tcf_blockcast_redir (struct sk_buff * skb , struct tcf_mirred * m ,
338+ struct tcf_block * block , int m_eaction ,
339+ const u32 exception_ifindex , int retval )
340+ {
341+ struct net_device * dev_prev = NULL ;
342+ struct net_device * dev = NULL ;
343+ unsigned long index ;
344+ int mirred_eaction ;
345+
346+ mirred_eaction = tcf_mirred_act_wants_ingress (m_eaction ) ?
347+ TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR ;
348+
349+ xa_for_each (& block -> ports , index , dev ) {
350+ if (index == exception_ifindex )
351+ continue ;
352+
353+ if (!dev_prev )
354+ goto assign_prev ;
355+
356+ tcf_mirred_to_dev (skb , m , dev_prev ,
357+ dev_is_mac_header_xmit (dev ),
358+ mirred_eaction , retval );
359+ assign_prev :
360+ dev_prev = dev ;
361+ }
362+
363+ if (dev_prev )
364+ return tcf_mirred_to_dev (skb , m , dev_prev ,
365+ dev_is_mac_header_xmit (dev_prev ),
366+ m_eaction , retval );
367+
368+ return retval ;
369+ }
370+
371+ static int tcf_blockcast_mirror (struct sk_buff * skb , struct tcf_mirred * m ,
372+ struct tcf_block * block , int m_eaction ,
373+ const u32 exception_ifindex , int retval )
374+ {
375+ struct net_device * dev = NULL ;
376+ unsigned long index ;
377+
378+ xa_for_each (& block -> ports , index , dev ) {
379+ if (index == exception_ifindex )
380+ continue ;
381+
382+ tcf_mirred_to_dev (skb , m , dev ,
383+ dev_is_mac_header_xmit (dev ),
384+ m_eaction , retval );
385+ }
386+
387+ return retval ;
388+ }
389+
390+ static int tcf_blockcast (struct sk_buff * skb , struct tcf_mirred * m ,
391+ const u32 blockid , struct tcf_result * res ,
392+ int retval )
393+ {
394+ const u32 exception_ifindex = skb -> dev -> ifindex ;
395+ struct tcf_block * block ;
396+ bool is_redirect ;
397+ int m_eaction ;
398+
399+ m_eaction = READ_ONCE (m -> tcfm_eaction );
400+ is_redirect = tcf_mirred_is_act_redirect (m_eaction );
401+
402+ /* we are already under rcu protection, so can call block lookup
403+ * directly.
404+ */
405+ block = tcf_block_lookup (dev_net (skb -> dev ), blockid );
406+ if (!block || xa_empty (& block -> ports )) {
407+ tcf_action_inc_overlimit_qstats (& m -> common );
408+ return retval ;
409+ }
410+
411+ if (is_redirect )
412+ return tcf_blockcast_redir (skb , m , block , m_eaction ,
413+ exception_ifindex , retval );
414+
415+ /* If it's not redirect, it is mirror */
416+ return tcf_blockcast_mirror (skb , m , block , m_eaction , exception_ifindex ,
417+ retval );
418+ }
419+
319420TC_INDIRECT_SCOPE int tcf_mirred_act (struct sk_buff * skb ,
320421 const struct tc_action * a ,
321422 struct tcf_result * res )
@@ -326,6 +427,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
326427 bool m_mac_header_xmit ;
327428 struct net_device * dev ;
328429 int m_eaction ;
430+ u32 blockid ;
329431
330432 nest_level = __this_cpu_inc_return (mirred_nest_level );
331433 if (unlikely (nest_level > MIRRED_NEST_LIMIT )) {
@@ -338,6 +440,12 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
338440 tcf_lastuse_update (& m -> tcf_tm );
339441 tcf_action_update_bstats (& m -> common , skb );
340442
443+ blockid = READ_ONCE (m -> tcfm_blockid );
444+ if (blockid ) {
445+ retval = tcf_blockcast (skb , m , blockid , res , retval );
446+ goto dec_nest_level ;
447+ }
448+
341449 dev = rcu_dereference_bh (m -> tcfm_dev );
342450 if (unlikely (!dev )) {
343451 pr_notice_once ("tc mirred: target device is gone\n" );
@@ -379,6 +487,7 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
379487 };
380488 struct net_device * dev ;
381489 struct tcf_t t ;
490+ u32 blockid ;
382491
383492 spin_lock_bh (& m -> tcf_lock );
384493 opt .action = m -> tcf_action ;
@@ -390,6 +499,10 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
390499 if (nla_put (skb , TCA_MIRRED_PARMS , sizeof (opt ), & opt ))
391500 goto nla_put_failure ;
392501
502+ blockid = m -> tcfm_blockid ;
503+ if (blockid && nla_put_u32 (skb , TCA_MIRRED_BLOCKID , blockid ))
504+ goto nla_put_failure ;
505+
393506 tcf_tm_dump (& t , & m -> tcf_tm );
394507 if (nla_put_64bit (skb , TCA_MIRRED_TM , sizeof (t ), & t , TCA_MIRRED_PAD ))
395508 goto nla_put_failure ;
@@ -420,6 +533,8 @@ static int mirred_device_event(struct notifier_block *unused,
420533 * net_device are already rcu protected.
421534 */
422535 RCU_INIT_POINTER (m -> tcfm_dev , NULL );
536+ } else if (m -> tcfm_blockid ) {
537+ m -> tcfm_blockid = 0 ;
423538 }
424539 spin_unlock_bh (& m -> tcf_lock );
425540 }
0 commit comments