Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/act_mirred.c	packet mirroring and redirect actions
  4 *
  5 * Authors:	Jamal Hadi Salim (2002-4)
  6 *
  7 * TODO: Add ingress support (and socket redirect support)
  8 */
  9
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/string.h>
 13#include <linux/errno.h>
 14#include <linux/skbuff.h>
 15#include <linux/rtnetlink.h>
 16#include <linux/module.h>
 17#include <linux/init.h>
 18#include <linux/gfp.h>
 19#include <linux/if_arp.h>
 20#include <net/net_namespace.h>
 21#include <net/netlink.h>
 22#include <net/dst.h>
 23#include <net/pkt_sched.h>
 24#include <net/pkt_cls.h>
 25#include <linux/tc_act/tc_mirred.h>
 26#include <net/tc_act/tc_mirred.h>
 27#include <net/tc_wrapper.h>
 28
 29static LIST_HEAD(mirred_list);
 30static DEFINE_SPINLOCK(mirred_list_lock);
 31
 32#define MIRRED_NEST_LIMIT    4
 33static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
 34
 35static bool tcf_mirred_is_act_redirect(int action)
 36{
 37	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
 38}
 39
 40static bool tcf_mirred_act_wants_ingress(int action)
 41{
 42	switch (action) {
 43	case TCA_EGRESS_REDIR:
 44	case TCA_EGRESS_MIRROR:
 45		return false;
 46	case TCA_INGRESS_REDIR:
 47	case TCA_INGRESS_MIRROR:
 48		return true;
 49	default:
 50		BUG();
 51	}
 52}
 53
 54static bool tcf_mirred_can_reinsert(int action)
 55{
 56	switch (action) {
 57	case TC_ACT_SHOT:
 58	case TC_ACT_STOLEN:
 59	case TC_ACT_QUEUED:
 60	case TC_ACT_TRAP:
 61		return true;
 62	}
 63	return false;
 64}
 65
 66static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
 67{
 68	return rcu_dereference_protected(m->tcfm_dev,
 69					 lockdep_is_held(&m->tcf_lock));
 70}
 71
 72static void tcf_mirred_release(struct tc_action *a)
 73{
 74	struct tcf_mirred *m = to_mirred(a);
 75	struct net_device *dev;
 76
 77	spin_lock(&mirred_list_lock);
 78	list_del(&m->tcfm_list);
 79	spin_unlock(&mirred_list_lock);
 80
 81	/* last reference to action, no need to lock */
 82	dev = rcu_dereference_protected(m->tcfm_dev, 1);
 83	netdev_put(dev, &m->tcfm_dev_tracker);
 84}
 85
 86static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
 87	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
 88	[TCA_MIRRED_BLOCKID]	= NLA_POLICY_MIN(NLA_U32, 1),
 89};
 90
 91static struct tc_action_ops act_mirred_ops;
 92
 93static void tcf_mirred_replace_dev(struct tcf_mirred *m,
 94				   struct net_device *ndev)
 95{
 96	struct net_device *odev;
 97
 98	odev = rcu_replace_pointer(m->tcfm_dev, ndev,
 99				   lockdep_is_held(&m->tcf_lock));
100	netdev_put(odev, &m->tcfm_dev_tracker);
101}
102
103static int tcf_mirred_init(struct net *net, struct nlattr *nla,
104			   struct nlattr *est, struct tc_action **a,
105			   struct tcf_proto *tp,
106			   u32 flags, struct netlink_ext_ack *extack)
107{
108	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
109	bool bind = flags & TCA_ACT_FLAGS_BIND;
110	struct nlattr *tb[TCA_MIRRED_MAX + 1];
111	struct tcf_chain *goto_ch = NULL;
112	bool mac_header_xmit = false;
113	struct tc_mirred *parm;
114	struct tcf_mirred *m;
115	bool exists = false;
116	int ret, err;
117	u32 index;
118
119	if (!nla) {
120		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
121		return -EINVAL;
122	}
123	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
124					  mirred_policy, extack);
125	if (ret < 0)
126		return ret;
127	if (!tb[TCA_MIRRED_PARMS]) {
128		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
129		return -EINVAL;
130	}
131	parm = nla_data(tb[TCA_MIRRED_PARMS]);
132	index = parm->index;
133	err = tcf_idr_check_alloc(tn, &index, a, bind);
134	if (err < 0)
135		return err;
136	exists = err;
137	if (exists && bind)
138		return ACT_P_BOUND;
139
140	if (tb[TCA_MIRRED_BLOCKID] && parm->ifindex) {
141		NL_SET_ERR_MSG_MOD(extack,
142				   "Cannot specify Block ID and dev simultaneously");
143		if (exists)
144			tcf_idr_release(*a, bind);
145		else
146			tcf_idr_cleanup(tn, index);
147
148		return -EINVAL;
149	}
150
151	switch (parm->eaction) {
152	case TCA_EGRESS_MIRROR:
153	case TCA_EGRESS_REDIR:
154	case TCA_INGRESS_REDIR:
155	case TCA_INGRESS_MIRROR:
156		break;
157	default:
158		if (exists)
159			tcf_idr_release(*a, bind);
160		else
161			tcf_idr_cleanup(tn, index);
162		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
163		return -EINVAL;
164	}
165
166	if (!exists) {
167		if (!parm->ifindex && !tb[TCA_MIRRED_BLOCKID]) {
168			tcf_idr_cleanup(tn, index);
169			NL_SET_ERR_MSG_MOD(extack,
170					   "Must specify device or block");
171			return -EINVAL;
172		}
173		ret = tcf_idr_create_from_flags(tn, index, est, a,
174						&act_mirred_ops, bind, flags);
175		if (ret) {
176			tcf_idr_cleanup(tn, index);
177			return ret;
178		}
179		ret = ACT_P_CREATED;
180	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
181		tcf_idr_release(*a, bind);
182		return -EEXIST;
183	}
184
185	m = to_mirred(*a);
186	if (ret == ACT_P_CREATED)
187		INIT_LIST_HEAD(&m->tcfm_list);
188
189	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
190	if (err < 0)
191		goto release_idr;
192
193	spin_lock_bh(&m->tcf_lock);
194
195	if (parm->ifindex) {
196		struct net_device *ndev;
197
198		ndev = dev_get_by_index(net, parm->ifindex);
199		if (!ndev) {
200			spin_unlock_bh(&m->tcf_lock);
201			err = -ENODEV;
202			goto put_chain;
203		}
204		mac_header_xmit = dev_is_mac_header_xmit(ndev);
205		tcf_mirred_replace_dev(m, ndev);
 
 
206		netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
207		m->tcfm_mac_header_xmit = mac_header_xmit;
208		m->tcfm_blockid = 0;
209	} else if (tb[TCA_MIRRED_BLOCKID]) {
210		tcf_mirred_replace_dev(m, NULL);
211		m->tcfm_mac_header_xmit = false;
212		m->tcfm_blockid = nla_get_u32(tb[TCA_MIRRED_BLOCKID]);
213	}
214	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
215	m->tcfm_eaction = parm->eaction;
216	spin_unlock_bh(&m->tcf_lock);
217	if (goto_ch)
218		tcf_chain_put_by_act(goto_ch);
219
220	if (ret == ACT_P_CREATED) {
221		spin_lock(&mirred_list_lock);
222		list_add(&m->tcfm_list, &mirred_list);
223		spin_unlock(&mirred_list_lock);
224	}
225
226	return ret;
227put_chain:
228	if (goto_ch)
229		tcf_chain_put_by_act(goto_ch);
230release_idr:
231	tcf_idr_release(*a, bind);
232	return err;
233}
234
235static int
236tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
237{
238	int err;
239
240	if (!want_ingress)
241		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
242	else if (!at_ingress)
243		err = netif_rx(skb);
244	else
245		err = netif_receive_skb(skb);
246
247	return err;
248}
249
250static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
251			     struct net_device *dev,
252			     const bool m_mac_header_xmit, int m_eaction,
253			     int retval)
254{
255	struct sk_buff *skb_to_send = skb;
 
 
 
 
 
 
256	bool want_ingress;
257	bool is_redirect;
258	bool expects_nh;
259	bool at_ingress;
260	bool dont_clone;
261	int mac_len;
262	bool at_nh;
263	int err;
264
265	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
266	if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
268				       dev->name);
269		goto err_cant_do;
270	}
271
272	/* we could easily avoid the clone only if called by ingress and clsact;
273	 * since we can't easily detect the clsact caller, skip clone only for
274	 * ingress - that covers the TC S/W datapath.
275	 */
 
276	at_ingress = skb_at_tc_ingress(skb);
277	dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
278		tcf_mirred_can_reinsert(retval);
279	if (!dont_clone) {
280		skb_to_send = skb_clone(skb, GFP_ATOMIC);
281		if (!skb_to_send)
282			goto err_cant_do;
283	}
284
285	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
286
287	/* All mirred/redirected skbs should clear previous ct info */
288	nf_reset_ct(skb_to_send);
289	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
290		skb_dst_drop(skb_to_send);
291
292	expects_nh = want_ingress || !m_mac_header_xmit;
293	at_nh = skb->data == skb_network_header(skb);
294	if (at_nh != expects_nh) {
295		mac_len = at_ingress ? skb->mac_len :
296			  skb_network_offset(skb);
297		if (expects_nh) {
298			/* target device/action expect data at nh */
299			skb_pull_rcsum(skb_to_send, mac_len);
300		} else {
301			/* target device/action expect data at mac */
302			skb_push_rcsum(skb_to_send, mac_len);
303		}
304	}
305
306	skb_to_send->skb_iif = skb->dev->ifindex;
307	skb_to_send->dev = dev;
308
 
309	if (is_redirect) {
310		if (skb == skb_to_send)
311			retval = TC_ACT_CONSUMED;
312
313		skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
314
315		err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
316	} else {
317		err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
318	}
319	if (err)
320		tcf_action_inc_overlimit_qstats(&m->common);
321
322	return retval;
323
324err_cant_do:
325	if (is_redirect)
326		retval = TC_ACT_SHOT;
327	tcf_action_inc_overlimit_qstats(&m->common);
328	return retval;
329}
330
331static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m,
332			       struct tcf_block *block, int m_eaction,
333			       const u32 exception_ifindex, int retval)
334{
335	struct net_device *dev_prev = NULL;
336	struct net_device *dev = NULL;
337	unsigned long index;
338	int mirred_eaction;
339
340	mirred_eaction = tcf_mirred_act_wants_ingress(m_eaction) ?
341		TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR;
342
343	xa_for_each(&block->ports, index, dev) {
344		if (index == exception_ifindex)
345			continue;
346
347		if (!dev_prev)
348			goto assign_prev;
349
350		tcf_mirred_to_dev(skb, m, dev_prev,
351				  dev_is_mac_header_xmit(dev),
352				  mirred_eaction, retval);
353assign_prev:
354		dev_prev = dev;
355	}
356
357	if (dev_prev)
358		return tcf_mirred_to_dev(skb, m, dev_prev,
359					 dev_is_mac_header_xmit(dev_prev),
360					 m_eaction, retval);
361
362	return retval;
363}
364
365static int tcf_blockcast_mirror(struct sk_buff *skb, struct tcf_mirred *m,
366				struct tcf_block *block, int m_eaction,
367				const u32 exception_ifindex, int retval)
368{
369	struct net_device *dev = NULL;
370	unsigned long index;
371
372	xa_for_each(&block->ports, index, dev) {
373		if (index == exception_ifindex)
374			continue;
375
376		tcf_mirred_to_dev(skb, m, dev,
377				  dev_is_mac_header_xmit(dev),
378				  m_eaction, retval);
379	}
380
381	return retval;
382}
383
384static int tcf_blockcast(struct sk_buff *skb, struct tcf_mirred *m,
385			 const u32 blockid, struct tcf_result *res,
386			 int retval)
387{
388	const u32 exception_ifindex = skb->dev->ifindex;
389	struct tcf_block *block;
390	bool is_redirect;
391	int m_eaction;
392
393	m_eaction = READ_ONCE(m->tcfm_eaction);
394	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
395
396	/* we are already under rcu protection, so can call block lookup
397	 * directly.
398	 */
399	block = tcf_block_lookup(dev_net(skb->dev), blockid);
400	if (!block || xa_empty(&block->ports)) {
401		tcf_action_inc_overlimit_qstats(&m->common);
402		return retval;
403	}
404
405	if (is_redirect)
406		return tcf_blockcast_redir(skb, m, block, m_eaction,
407					   exception_ifindex, retval);
408
409	/* If it's not redirect, it is mirror */
410	return tcf_blockcast_mirror(skb, m, block, m_eaction, exception_ifindex,
411				    retval);
412}
413
414TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
415				     const struct tc_action *a,
416				     struct tcf_result *res)
417{
418	struct tcf_mirred *m = to_mirred(a);
419	int retval = READ_ONCE(m->tcf_action);
420	unsigned int nest_level;
421	bool m_mac_header_xmit;
422	struct net_device *dev;
423	int m_eaction;
424	u32 blockid;
425
426	nest_level = __this_cpu_inc_return(mirred_nest_level);
427	if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
428		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
429				     netdev_name(skb->dev));
430		retval = TC_ACT_SHOT;
431		goto dec_nest_level;
432	}
433
434	tcf_lastuse_update(&m->tcf_tm);
435	tcf_action_update_bstats(&m->common, skb);
436
437	blockid = READ_ONCE(m->tcfm_blockid);
438	if (blockid) {
439		retval = tcf_blockcast(skb, m, blockid, res, retval);
440		goto dec_nest_level;
 
 
 
 
441	}
442
443	dev = rcu_dereference_bh(m->tcfm_dev);
444	if (unlikely(!dev)) {
445		pr_notice_once("tc mirred: target device is gone\n");
446		tcf_action_inc_overlimit_qstats(&m->common);
447		goto dec_nest_level;
 
448	}
449
450	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
451	m_eaction = READ_ONCE(m->tcfm_eaction);
452
453	retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
454				   retval);
455
456dec_nest_level:
457	__this_cpu_dec(mirred_nest_level);
458
459	return retval;
460}
461
462static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
463			     u64 drops, u64 lastuse, bool hw)
464{
465	struct tcf_mirred *m = to_mirred(a);
466	struct tcf_t *tm = &m->tcf_tm;
467
468	tcf_action_update_stats(a, bytes, packets, drops, hw);
469	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
470}
471
472static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
473			   int ref)
474{
475	unsigned char *b = skb_tail_pointer(skb);
476	struct tcf_mirred *m = to_mirred(a);
477	struct tc_mirred opt = {
478		.index   = m->tcf_index,
479		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
480		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
481	};
482	struct net_device *dev;
483	struct tcf_t t;
484	u32 blockid;
485
486	spin_lock_bh(&m->tcf_lock);
487	opt.action = m->tcf_action;
488	opt.eaction = m->tcfm_eaction;
489	dev = tcf_mirred_dev_dereference(m);
490	if (dev)
491		opt.ifindex = dev->ifindex;
492
493	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
494		goto nla_put_failure;
495
496	blockid = m->tcfm_blockid;
497	if (blockid && nla_put_u32(skb, TCA_MIRRED_BLOCKID, blockid))
498		goto nla_put_failure;
499
500	tcf_tm_dump(&t, &m->tcf_tm);
501	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
502		goto nla_put_failure;
503	spin_unlock_bh(&m->tcf_lock);
504
505	return skb->len;
506
507nla_put_failure:
508	spin_unlock_bh(&m->tcf_lock);
509	nlmsg_trim(skb, b);
510	return -1;
511}
512
513static int mirred_device_event(struct notifier_block *unused,
514			       unsigned long event, void *ptr)
515{
516	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
517	struct tcf_mirred *m;
518
519	ASSERT_RTNL();
520	if (event == NETDEV_UNREGISTER) {
521		spin_lock(&mirred_list_lock);
522		list_for_each_entry(m, &mirred_list, tcfm_list) {
523			spin_lock_bh(&m->tcf_lock);
524			if (tcf_mirred_dev_dereference(m) == dev) {
525				netdev_put(dev, &m->tcfm_dev_tracker);
526				/* Note : no rcu grace period necessary, as
527				 * net_device are already rcu protected.
528				 */
529				RCU_INIT_POINTER(m->tcfm_dev, NULL);
530			}
531			spin_unlock_bh(&m->tcf_lock);
532		}
533		spin_unlock(&mirred_list_lock);
534	}
535
536	return NOTIFY_DONE;
537}
538
539static struct notifier_block mirred_device_notifier = {
540	.notifier_call = mirred_device_event,
541};
542
543static void tcf_mirred_dev_put(void *priv)
544{
545	struct net_device *dev = priv;
546
547	dev_put(dev);
548}
549
550static struct net_device *
551tcf_mirred_get_dev(const struct tc_action *a,
552		   tc_action_priv_destructor *destructor)
553{
554	struct tcf_mirred *m = to_mirred(a);
555	struct net_device *dev;
556
557	rcu_read_lock();
558	dev = rcu_dereference(m->tcfm_dev);
559	if (dev) {
560		dev_hold(dev);
561		*destructor = tcf_mirred_dev_put;
562	}
563	rcu_read_unlock();
564
565	return dev;
566}
567
568static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
569{
570	return nla_total_size(sizeof(struct tc_mirred));
571}
572
573static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
574				       const struct tc_action *act)
575{
576	entry->dev = act->ops->get_dev(act, &entry->destructor);
577	if (!entry->dev)
578		return;
579	entry->destructor_priv = entry->dev;
580}
581
582static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
583					u32 *index_inc, bool bind,
584					struct netlink_ext_ack *extack)
585{
586	if (bind) {
587		struct flow_action_entry *entry = entry_data;
588
589		if (is_tcf_mirred_egress_redirect(act)) {
590			entry->id = FLOW_ACTION_REDIRECT;
591			tcf_offload_mirred_get_dev(entry, act);
592		} else if (is_tcf_mirred_egress_mirror(act)) {
593			entry->id = FLOW_ACTION_MIRRED;
594			tcf_offload_mirred_get_dev(entry, act);
595		} else if (is_tcf_mirred_ingress_redirect(act)) {
596			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
597			tcf_offload_mirred_get_dev(entry, act);
598		} else if (is_tcf_mirred_ingress_mirror(act)) {
599			entry->id = FLOW_ACTION_MIRRED_INGRESS;
600			tcf_offload_mirred_get_dev(entry, act);
601		} else {
602			NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
603			return -EOPNOTSUPP;
604		}
605		*index_inc = 1;
606	} else {
607		struct flow_offload_action *fl_action = entry_data;
608
609		if (is_tcf_mirred_egress_redirect(act))
610			fl_action->id = FLOW_ACTION_REDIRECT;
611		else if (is_tcf_mirred_egress_mirror(act))
612			fl_action->id = FLOW_ACTION_MIRRED;
613		else if (is_tcf_mirred_ingress_redirect(act))
614			fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
615		else if (is_tcf_mirred_ingress_mirror(act))
616			fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
617		else
618			return -EOPNOTSUPP;
619	}
620
621	return 0;
622}
623
624static struct tc_action_ops act_mirred_ops = {
625	.kind		=	"mirred",
626	.id		=	TCA_ID_MIRRED,
627	.owner		=	THIS_MODULE,
628	.act		=	tcf_mirred_act,
629	.stats_update	=	tcf_stats_update,
630	.dump		=	tcf_mirred_dump,
631	.cleanup	=	tcf_mirred_release,
632	.init		=	tcf_mirred_init,
633	.get_fill_size	=	tcf_mirred_get_fill_size,
634	.offload_act_setup =	tcf_mirred_offload_act_setup,
635	.size		=	sizeof(struct tcf_mirred),
636	.get_dev	=	tcf_mirred_get_dev,
637};
638MODULE_ALIAS_NET_ACT("mirred");
639
640static __net_init int mirred_init_net(struct net *net)
641{
642	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
643
644	return tc_action_net_init(net, tn, &act_mirred_ops);
645}
646
647static void __net_exit mirred_exit_net(struct list_head *net_list)
648{
649	tc_action_net_exit(net_list, act_mirred_ops.net_id);
650}
651
652static struct pernet_operations mirred_net_ops = {
653	.init = mirred_init_net,
654	.exit_batch = mirred_exit_net,
655	.id   = &act_mirred_ops.net_id,
656	.size = sizeof(struct tc_action_net),
657};
658
659MODULE_AUTHOR("Jamal Hadi Salim(2002)");
660MODULE_DESCRIPTION("Device Mirror/redirect actions");
661MODULE_LICENSE("GPL");
662
663static int __init mirred_init_module(void)
664{
665	int err = register_netdevice_notifier(&mirred_device_notifier);
666	if (err)
667		return err;
668
669	pr_info("Mirror/redirect action on\n");
670	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
671	if (err)
672		unregister_netdevice_notifier(&mirred_device_notifier);
673
674	return err;
675}
676
677static void __exit mirred_cleanup_module(void)
678{
679	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
680	unregister_netdevice_notifier(&mirred_device_notifier);
681}
682
683module_init(mirred_init_module);
684module_exit(mirred_cleanup_module);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/act_mirred.c	packet mirroring and redirect actions
  4 *
  5 * Authors:	Jamal Hadi Salim (2002-4)
  6 *
  7 * TODO: Add ingress support (and socket redirect support)
  8 */
  9
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/string.h>
 13#include <linux/errno.h>
 14#include <linux/skbuff.h>
 15#include <linux/rtnetlink.h>
 16#include <linux/module.h>
 17#include <linux/init.h>
 18#include <linux/gfp.h>
 19#include <linux/if_arp.h>
 20#include <net/net_namespace.h>
 21#include <net/netlink.h>
 22#include <net/dst.h>
 23#include <net/pkt_sched.h>
 24#include <net/pkt_cls.h>
 25#include <linux/tc_act/tc_mirred.h>
 26#include <net/tc_act/tc_mirred.h>
 27#include <net/tc_wrapper.h>
 28
 29static LIST_HEAD(mirred_list);
 30static DEFINE_SPINLOCK(mirred_list_lock);
 31
 32#define MIRRED_RECURSION_LIMIT    4
 33static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
 34
 35static bool tcf_mirred_is_act_redirect(int action)
 36{
 37	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
 38}
 39
 40static bool tcf_mirred_act_wants_ingress(int action)
 41{
 42	switch (action) {
 43	case TCA_EGRESS_REDIR:
 44	case TCA_EGRESS_MIRROR:
 45		return false;
 46	case TCA_INGRESS_REDIR:
 47	case TCA_INGRESS_MIRROR:
 48		return true;
 49	default:
 50		BUG();
 51	}
 52}
 53
 54static bool tcf_mirred_can_reinsert(int action)
 55{
 56	switch (action) {
 57	case TC_ACT_SHOT:
 58	case TC_ACT_STOLEN:
 59	case TC_ACT_QUEUED:
 60	case TC_ACT_TRAP:
 61		return true;
 62	}
 63	return false;
 64}
 65
 66static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
 67{
 68	return rcu_dereference_protected(m->tcfm_dev,
 69					 lockdep_is_held(&m->tcf_lock));
 70}
 71
 72static void tcf_mirred_release(struct tc_action *a)
 73{
 74	struct tcf_mirred *m = to_mirred(a);
 75	struct net_device *dev;
 76
 77	spin_lock(&mirred_list_lock);
 78	list_del(&m->tcfm_list);
 79	spin_unlock(&mirred_list_lock);
 80
 81	/* last reference to action, no need to lock */
 82	dev = rcu_dereference_protected(m->tcfm_dev, 1);
 83	netdev_put(dev, &m->tcfm_dev_tracker);
 84}
 85
 86static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
 87	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
 
 88};
 89
 90static struct tc_action_ops act_mirred_ops;
 91
 
 
 
 
 
 
 
 
 
 
 92static int tcf_mirred_init(struct net *net, struct nlattr *nla,
 93			   struct nlattr *est, struct tc_action **a,
 94			   struct tcf_proto *tp,
 95			   u32 flags, struct netlink_ext_ack *extack)
 96{
 97	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
 98	bool bind = flags & TCA_ACT_FLAGS_BIND;
 99	struct nlattr *tb[TCA_MIRRED_MAX + 1];
100	struct tcf_chain *goto_ch = NULL;
101	bool mac_header_xmit = false;
102	struct tc_mirred *parm;
103	struct tcf_mirred *m;
104	bool exists = false;
105	int ret, err;
106	u32 index;
107
108	if (!nla) {
109		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
110		return -EINVAL;
111	}
112	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
113					  mirred_policy, extack);
114	if (ret < 0)
115		return ret;
116	if (!tb[TCA_MIRRED_PARMS]) {
117		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
118		return -EINVAL;
119	}
120	parm = nla_data(tb[TCA_MIRRED_PARMS]);
121	index = parm->index;
122	err = tcf_idr_check_alloc(tn, &index, a, bind);
123	if (err < 0)
124		return err;
125	exists = err;
126	if (exists && bind)
127		return 0;
 
 
 
 
 
 
 
 
 
 
 
128
129	switch (parm->eaction) {
130	case TCA_EGRESS_MIRROR:
131	case TCA_EGRESS_REDIR:
132	case TCA_INGRESS_REDIR:
133	case TCA_INGRESS_MIRROR:
134		break;
135	default:
136		if (exists)
137			tcf_idr_release(*a, bind);
138		else
139			tcf_idr_cleanup(tn, index);
140		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
141		return -EINVAL;
142	}
143
144	if (!exists) {
145		if (!parm->ifindex) {
146			tcf_idr_cleanup(tn, index);
147			NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
 
148			return -EINVAL;
149		}
150		ret = tcf_idr_create_from_flags(tn, index, est, a,
151						&act_mirred_ops, bind, flags);
152		if (ret) {
153			tcf_idr_cleanup(tn, index);
154			return ret;
155		}
156		ret = ACT_P_CREATED;
157	} else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
158		tcf_idr_release(*a, bind);
159		return -EEXIST;
160	}
161
162	m = to_mirred(*a);
163	if (ret == ACT_P_CREATED)
164		INIT_LIST_HEAD(&m->tcfm_list);
165
166	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
167	if (err < 0)
168		goto release_idr;
169
170	spin_lock_bh(&m->tcf_lock);
171
172	if (parm->ifindex) {
173		struct net_device *odev, *ndev;
174
175		ndev = dev_get_by_index(net, parm->ifindex);
176		if (!ndev) {
177			spin_unlock_bh(&m->tcf_lock);
178			err = -ENODEV;
179			goto put_chain;
180		}
181		mac_header_xmit = dev_is_mac_header_xmit(ndev);
182		odev = rcu_replace_pointer(m->tcfm_dev, ndev,
183					  lockdep_is_held(&m->tcf_lock));
184		netdev_put(odev, &m->tcfm_dev_tracker);
185		netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
186		m->tcfm_mac_header_xmit = mac_header_xmit;
 
 
 
 
 
187	}
188	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
189	m->tcfm_eaction = parm->eaction;
190	spin_unlock_bh(&m->tcf_lock);
191	if (goto_ch)
192		tcf_chain_put_by_act(goto_ch);
193
194	if (ret == ACT_P_CREATED) {
195		spin_lock(&mirred_list_lock);
196		list_add(&m->tcfm_list, &mirred_list);
197		spin_unlock(&mirred_list_lock);
198	}
199
200	return ret;
201put_chain:
202	if (goto_ch)
203		tcf_chain_put_by_act(goto_ch);
204release_idr:
205	tcf_idr_release(*a, bind);
206	return err;
207}
208
209static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
 
210{
211	int err;
212
213	if (!want_ingress)
214		err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
 
 
215	else
216		err = netif_receive_skb(skb);
217
218	return err;
219}
220
221TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
222				     const struct tc_action *a,
223				     struct tcf_result *res)
 
224{
225	struct tcf_mirred *m = to_mirred(a);
226	struct sk_buff *skb2 = skb;
227	bool m_mac_header_xmit;
228	struct net_device *dev;
229	unsigned int rec_level;
230	int retval, err = 0;
231	bool use_reinsert;
232	bool want_ingress;
233	bool is_redirect;
234	bool expects_nh;
235	bool at_ingress;
236	int m_eaction;
237	int mac_len;
238	bool at_nh;
 
239
240	rec_level = __this_cpu_inc_return(mirred_rec_level);
241	if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
242		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
243				     netdev_name(skb->dev));
244		__this_cpu_dec(mirred_rec_level);
245		return TC_ACT_SHOT;
246	}
247
248	tcf_lastuse_update(&m->tcf_tm);
249	tcf_action_update_bstats(&m->common, skb);
250
251	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
252	m_eaction = READ_ONCE(m->tcfm_eaction);
253	retval = READ_ONCE(m->tcf_action);
254	dev = rcu_dereference_bh(m->tcfm_dev);
255	if (unlikely(!dev)) {
256		pr_notice_once("tc mirred: target device is gone\n");
257		goto out;
258	}
259
260	if (unlikely(!(dev->flags & IFF_UP))) {
261		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
262				       dev->name);
263		goto out;
264	}
265
266	/* we could easily avoid the clone only if called by ingress and clsact;
267	 * since we can't easily detect the clsact caller, skip clone only for
268	 * ingress - that covers the TC S/W datapath.
269	 */
270	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
271	at_ingress = skb_at_tc_ingress(skb);
272	use_reinsert = at_ingress && is_redirect &&
273		       tcf_mirred_can_reinsert(retval);
274	if (!use_reinsert) {
275		skb2 = skb_clone(skb, GFP_ATOMIC);
276		if (!skb2)
277			goto out;
278	}
279
280	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
281
282	/* All mirred/redirected skbs should clear previous ct info */
283	nf_reset_ct(skb2);
284	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
285		skb_dst_drop(skb2);
286
287	expects_nh = want_ingress || !m_mac_header_xmit;
288	at_nh = skb->data == skb_network_header(skb);
289	if (at_nh != expects_nh) {
290		mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
291			  skb_network_header(skb) - skb_mac_header(skb);
292		if (expects_nh) {
293			/* target device/action expect data at nh */
294			skb_pull_rcsum(skb2, mac_len);
295		} else {
296			/* target device/action expect data at mac */
297			skb_push_rcsum(skb2, mac_len);
298		}
299	}
300
301	skb2->skb_iif = skb->dev->ifindex;
302	skb2->dev = dev;
303
304	/* mirror is always swallowed */
305	if (is_redirect) {
306		skb_set_redirected(skb2, skb2->tc_at_ingress);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
308		/* let's the caller reinsert the packet, if possible */
309		if (use_reinsert) {
310			err = tcf_mirred_forward(want_ingress, skb);
311			if (err)
312				tcf_action_inc_overlimit_qstats(&m->common);
313			__this_cpu_dec(mirred_rec_level);
314			return TC_ACT_CONSUMED;
315		}
316	}
317
318	err = tcf_mirred_forward(want_ingress, skb2);
319	if (err) {
320out:
321		tcf_action_inc_overlimit_qstats(&m->common);
322		if (tcf_mirred_is_act_redirect(m_eaction))
323			retval = TC_ACT_SHOT;
324	}
325	__this_cpu_dec(mirred_rec_level);
 
 
 
 
 
 
 
 
326
327	return retval;
328}
329
330static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
331			     u64 drops, u64 lastuse, bool hw)
332{
333	struct tcf_mirred *m = to_mirred(a);
334	struct tcf_t *tm = &m->tcf_tm;
335
336	tcf_action_update_stats(a, bytes, packets, drops, hw);
337	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
338}
339
340static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
341			   int ref)
342{
343	unsigned char *b = skb_tail_pointer(skb);
344	struct tcf_mirred *m = to_mirred(a);
345	struct tc_mirred opt = {
346		.index   = m->tcf_index,
347		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
348		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
349	};
350	struct net_device *dev;
351	struct tcf_t t;
 
352
353	spin_lock_bh(&m->tcf_lock);
354	opt.action = m->tcf_action;
355	opt.eaction = m->tcfm_eaction;
356	dev = tcf_mirred_dev_dereference(m);
357	if (dev)
358		opt.ifindex = dev->ifindex;
359
360	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
361		goto nla_put_failure;
362
 
 
 
 
363	tcf_tm_dump(&t, &m->tcf_tm);
364	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
365		goto nla_put_failure;
366	spin_unlock_bh(&m->tcf_lock);
367
368	return skb->len;
369
370nla_put_failure:
371	spin_unlock_bh(&m->tcf_lock);
372	nlmsg_trim(skb, b);
373	return -1;
374}
375
376static int mirred_device_event(struct notifier_block *unused,
377			       unsigned long event, void *ptr)
378{
379	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
380	struct tcf_mirred *m;
381
382	ASSERT_RTNL();
383	if (event == NETDEV_UNREGISTER) {
384		spin_lock(&mirred_list_lock);
385		list_for_each_entry(m, &mirred_list, tcfm_list) {
386			spin_lock_bh(&m->tcf_lock);
387			if (tcf_mirred_dev_dereference(m) == dev) {
388				netdev_put(dev, &m->tcfm_dev_tracker);
389				/* Note : no rcu grace period necessary, as
390				 * net_device are already rcu protected.
391				 */
392				RCU_INIT_POINTER(m->tcfm_dev, NULL);
393			}
394			spin_unlock_bh(&m->tcf_lock);
395		}
396		spin_unlock(&mirred_list_lock);
397	}
398
399	return NOTIFY_DONE;
400}
401
402static struct notifier_block mirred_device_notifier = {
403	.notifier_call = mirred_device_event,
404};
405
406static void tcf_mirred_dev_put(void *priv)
407{
408	struct net_device *dev = priv;
409
410	dev_put(dev);
411}
412
413static struct net_device *
414tcf_mirred_get_dev(const struct tc_action *a,
415		   tc_action_priv_destructor *destructor)
416{
417	struct tcf_mirred *m = to_mirred(a);
418	struct net_device *dev;
419
420	rcu_read_lock();
421	dev = rcu_dereference(m->tcfm_dev);
422	if (dev) {
423		dev_hold(dev);
424		*destructor = tcf_mirred_dev_put;
425	}
426	rcu_read_unlock();
427
428	return dev;
429}
430
431static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
432{
433	return nla_total_size(sizeof(struct tc_mirred));
434}
435
436static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
437				       const struct tc_action *act)
438{
439	entry->dev = act->ops->get_dev(act, &entry->destructor);
440	if (!entry->dev)
441		return;
442	entry->destructor_priv = entry->dev;
443}
444
445static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
446					u32 *index_inc, bool bind,
447					struct netlink_ext_ack *extack)
448{
449	if (bind) {
450		struct flow_action_entry *entry = entry_data;
451
452		if (is_tcf_mirred_egress_redirect(act)) {
453			entry->id = FLOW_ACTION_REDIRECT;
454			tcf_offload_mirred_get_dev(entry, act);
455		} else if (is_tcf_mirred_egress_mirror(act)) {
456			entry->id = FLOW_ACTION_MIRRED;
457			tcf_offload_mirred_get_dev(entry, act);
458		} else if (is_tcf_mirred_ingress_redirect(act)) {
459			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
460			tcf_offload_mirred_get_dev(entry, act);
461		} else if (is_tcf_mirred_ingress_mirror(act)) {
462			entry->id = FLOW_ACTION_MIRRED_INGRESS;
463			tcf_offload_mirred_get_dev(entry, act);
464		} else {
465			NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
466			return -EOPNOTSUPP;
467		}
468		*index_inc = 1;
469	} else {
470		struct flow_offload_action *fl_action = entry_data;
471
472		if (is_tcf_mirred_egress_redirect(act))
473			fl_action->id = FLOW_ACTION_REDIRECT;
474		else if (is_tcf_mirred_egress_mirror(act))
475			fl_action->id = FLOW_ACTION_MIRRED;
476		else if (is_tcf_mirred_ingress_redirect(act))
477			fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
478		else if (is_tcf_mirred_ingress_mirror(act))
479			fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
480		else
481			return -EOPNOTSUPP;
482	}
483
484	return 0;
485}
486
487static struct tc_action_ops act_mirred_ops = {
488	.kind		=	"mirred",
489	.id		=	TCA_ID_MIRRED,
490	.owner		=	THIS_MODULE,
491	.act		=	tcf_mirred_act,
492	.stats_update	=	tcf_stats_update,
493	.dump		=	tcf_mirred_dump,
494	.cleanup	=	tcf_mirred_release,
495	.init		=	tcf_mirred_init,
496	.get_fill_size	=	tcf_mirred_get_fill_size,
497	.offload_act_setup =	tcf_mirred_offload_act_setup,
498	.size		=	sizeof(struct tcf_mirred),
499	.get_dev	=	tcf_mirred_get_dev,
500};
 
501
502static __net_init int mirred_init_net(struct net *net)
503{
504	struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
505
506	return tc_action_net_init(net, tn, &act_mirred_ops);
507}
508
509static void __net_exit mirred_exit_net(struct list_head *net_list)
510{
511	tc_action_net_exit(net_list, act_mirred_ops.net_id);
512}
513
514static struct pernet_operations mirred_net_ops = {
515	.init = mirred_init_net,
516	.exit_batch = mirred_exit_net,
517	.id   = &act_mirred_ops.net_id,
518	.size = sizeof(struct tc_action_net),
519};
520
521MODULE_AUTHOR("Jamal Hadi Salim(2002)");
522MODULE_DESCRIPTION("Device Mirror/redirect actions");
523MODULE_LICENSE("GPL");
524
525static int __init mirred_init_module(void)
526{
527	int err = register_netdevice_notifier(&mirred_device_notifier);
528	if (err)
529		return err;
530
531	pr_info("Mirror/redirect action on\n");
532	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
533	if (err)
534		unregister_netdevice_notifier(&mirred_device_notifier);
535
536	return err;
537}
538
539static void __exit mirred_cleanup_module(void)
540{
541	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
542	unregister_netdevice_notifier(&mirred_device_notifier);
543}
544
545module_init(mirred_init_module);
546module_exit(mirred_cleanup_module);