Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/act_mirred.c packet mirroring and redirect actions
4 *
5 * Authors: Jamal Hadi Salim (2002-4)
6 *
7 * TODO: Add ingress support (and socket redirect support)
8 */
9
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <linux/rtnetlink.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/gfp.h>
19#include <linux/if_arp.h>
20#include <net/net_namespace.h>
21#include <net/netlink.h>
22#include <net/pkt_sched.h>
23#include <net/pkt_cls.h>
24#include <linux/tc_act/tc_mirred.h>
25#include <net/tc_act/tc_mirred.h>
26
27static LIST_HEAD(mirred_list);
28static DEFINE_SPINLOCK(mirred_list_lock);
29
30#define MIRRED_RECURSION_LIMIT 4
31static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
32
33static bool tcf_mirred_is_act_redirect(int action)
34{
35 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
36}
37
38static bool tcf_mirred_act_wants_ingress(int action)
39{
40 switch (action) {
41 case TCA_EGRESS_REDIR:
42 case TCA_EGRESS_MIRROR:
43 return false;
44 case TCA_INGRESS_REDIR:
45 case TCA_INGRESS_MIRROR:
46 return true;
47 default:
48 BUG();
49 }
50}
51
52static bool tcf_mirred_can_reinsert(int action)
53{
54 switch (action) {
55 case TC_ACT_SHOT:
56 case TC_ACT_STOLEN:
57 case TC_ACT_QUEUED:
58 case TC_ACT_TRAP:
59 return true;
60 }
61 return false;
62}
63
64static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
65{
66 return rcu_dereference_protected(m->tcfm_dev,
67 lockdep_is_held(&m->tcf_lock));
68}
69
70static void tcf_mirred_release(struct tc_action *a)
71{
72 struct tcf_mirred *m = to_mirred(a);
73 struct net_device *dev;
74
75 spin_lock(&mirred_list_lock);
76 list_del(&m->tcfm_list);
77 spin_unlock(&mirred_list_lock);
78
79 /* last reference to action, no need to lock */
80 dev = rcu_dereference_protected(m->tcfm_dev, 1);
81 if (dev)
82 dev_put(dev);
83}
84
85static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
86 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
87};
88
89static unsigned int mirred_net_id;
90static struct tc_action_ops act_mirred_ops;
91
92static int tcf_mirred_init(struct net *net, struct nlattr *nla,
93 struct nlattr *est, struct tc_action **a,
94 int ovr, int bind, bool rtnl_held,
95 struct tcf_proto *tp,
96 u32 flags, struct netlink_ext_ack *extack)
97{
98 struct tc_action_net *tn = net_generic(net, mirred_net_id);
99 struct nlattr *tb[TCA_MIRRED_MAX + 1];
100 struct tcf_chain *goto_ch = NULL;
101 bool mac_header_xmit = false;
102 struct tc_mirred *parm;
103 struct tcf_mirred *m;
104 struct net_device *dev;
105 bool exists = false;
106 int ret, err;
107 u32 index;
108
109 if (!nla) {
110 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
111 return -EINVAL;
112 }
113 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
114 mirred_policy, extack);
115 if (ret < 0)
116 return ret;
117 if (!tb[TCA_MIRRED_PARMS]) {
118 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
119 return -EINVAL;
120 }
121 parm = nla_data(tb[TCA_MIRRED_PARMS]);
122 index = parm->index;
123 err = tcf_idr_check_alloc(tn, &index, a, bind);
124 if (err < 0)
125 return err;
126 exists = err;
127 if (exists && bind)
128 return 0;
129
130 switch (parm->eaction) {
131 case TCA_EGRESS_MIRROR:
132 case TCA_EGRESS_REDIR:
133 case TCA_INGRESS_REDIR:
134 case TCA_INGRESS_MIRROR:
135 break;
136 default:
137 if (exists)
138 tcf_idr_release(*a, bind);
139 else
140 tcf_idr_cleanup(tn, index);
141 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
142 return -EINVAL;
143 }
144
145 if (!exists) {
146 if (!parm->ifindex) {
147 tcf_idr_cleanup(tn, index);
148 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
149 return -EINVAL;
150 }
151 ret = tcf_idr_create_from_flags(tn, index, est, a,
152 &act_mirred_ops, bind, flags);
153 if (ret) {
154 tcf_idr_cleanup(tn, index);
155 return ret;
156 }
157 ret = ACT_P_CREATED;
158 } else if (!ovr) {
159 tcf_idr_release(*a, bind);
160 return -EEXIST;
161 }
162
163 m = to_mirred(*a);
164 if (ret == ACT_P_CREATED)
165 INIT_LIST_HEAD(&m->tcfm_list);
166
167 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
168 if (err < 0)
169 goto release_idr;
170
171 spin_lock_bh(&m->tcf_lock);
172
173 if (parm->ifindex) {
174 dev = dev_get_by_index(net, parm->ifindex);
175 if (!dev) {
176 spin_unlock_bh(&m->tcf_lock);
177 err = -ENODEV;
178 goto put_chain;
179 }
180 mac_header_xmit = dev_is_mac_header_xmit(dev);
181 dev = rcu_replace_pointer(m->tcfm_dev, dev,
182 lockdep_is_held(&m->tcf_lock));
183 if (dev)
184 dev_put(dev);
185 m->tcfm_mac_header_xmit = mac_header_xmit;
186 }
187 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
188 m->tcfm_eaction = parm->eaction;
189 spin_unlock_bh(&m->tcf_lock);
190 if (goto_ch)
191 tcf_chain_put_by_act(goto_ch);
192
193 if (ret == ACT_P_CREATED) {
194 spin_lock(&mirred_list_lock);
195 list_add(&m->tcfm_list, &mirred_list);
196 spin_unlock(&mirred_list_lock);
197 }
198
199 return ret;
200put_chain:
201 if (goto_ch)
202 tcf_chain_put_by_act(goto_ch);
203release_idr:
204 tcf_idr_release(*a, bind);
205 return err;
206}
207
208static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
209 struct tcf_result *res)
210{
211 struct tcf_mirred *m = to_mirred(a);
212 struct sk_buff *skb2 = skb;
213 bool m_mac_header_xmit;
214 struct net_device *dev;
215 unsigned int rec_level;
216 int retval, err = 0;
217 bool use_reinsert;
218 bool want_ingress;
219 bool is_redirect;
220 bool expects_nh;
221 int m_eaction;
222 int mac_len;
223 bool at_nh;
224
225 rec_level = __this_cpu_inc_return(mirred_rec_level);
226 if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
227 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
228 netdev_name(skb->dev));
229 __this_cpu_dec(mirred_rec_level);
230 return TC_ACT_SHOT;
231 }
232
233 tcf_lastuse_update(&m->tcf_tm);
234 tcf_action_update_bstats(&m->common, skb);
235
236 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
237 m_eaction = READ_ONCE(m->tcfm_eaction);
238 retval = READ_ONCE(m->tcf_action);
239 dev = rcu_dereference_bh(m->tcfm_dev);
240 if (unlikely(!dev)) {
241 pr_notice_once("tc mirred: target device is gone\n");
242 goto out;
243 }
244
245 if (unlikely(!(dev->flags & IFF_UP))) {
246 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
247 dev->name);
248 goto out;
249 }
250
251 /* we could easily avoid the clone only if called by ingress and clsact;
252 * since we can't easily detect the clsact caller, skip clone only for
253 * ingress - that covers the TC S/W datapath.
254 */
255 is_redirect = tcf_mirred_is_act_redirect(m_eaction);
256 use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
257 tcf_mirred_can_reinsert(retval);
258 if (!use_reinsert) {
259 skb2 = skb_clone(skb, GFP_ATOMIC);
260 if (!skb2)
261 goto out;
262 }
263
264 want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
265
266 expects_nh = want_ingress || !m_mac_header_xmit;
267 at_nh = skb->data == skb_network_header(skb);
268 if (at_nh != expects_nh) {
269 mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
270 skb_network_header(skb) - skb_mac_header(skb);
271 if (expects_nh) {
272 /* target device/action expect data at nh */
273 skb_pull_rcsum(skb2, mac_len);
274 } else {
275 /* target device/action expect data at mac */
276 skb_push_rcsum(skb2, mac_len);
277 }
278 }
279
280 skb2->skb_iif = skb->dev->ifindex;
281 skb2->dev = dev;
282
283 /* mirror is always swallowed */
284 if (is_redirect) {
285 skb_set_redirected(skb2, skb2->tc_at_ingress);
286
287 /* let's the caller reinsert the packet, if possible */
288 if (use_reinsert) {
289 res->ingress = want_ingress;
290 if (skb_tc_reinsert(skb, res))
291 tcf_action_inc_overlimit_qstats(&m->common);
292 __this_cpu_dec(mirred_rec_level);
293 return TC_ACT_CONSUMED;
294 }
295 }
296
297 if (!want_ingress)
298 err = dev_queue_xmit(skb2);
299 else
300 err = netif_receive_skb(skb2);
301
302 if (err) {
303out:
304 tcf_action_inc_overlimit_qstats(&m->common);
305 if (tcf_mirred_is_act_redirect(m_eaction))
306 retval = TC_ACT_SHOT;
307 }
308 __this_cpu_dec(mirred_rec_level);
309
310 return retval;
311}
312
313static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
314 u64 drops, u64 lastuse, bool hw)
315{
316 struct tcf_mirred *m = to_mirred(a);
317 struct tcf_t *tm = &m->tcf_tm;
318
319 tcf_action_update_stats(a, bytes, packets, drops, hw);
320 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
321}
322
323static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
324 int ref)
325{
326 unsigned char *b = skb_tail_pointer(skb);
327 struct tcf_mirred *m = to_mirred(a);
328 struct tc_mirred opt = {
329 .index = m->tcf_index,
330 .refcnt = refcount_read(&m->tcf_refcnt) - ref,
331 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
332 };
333 struct net_device *dev;
334 struct tcf_t t;
335
336 spin_lock_bh(&m->tcf_lock);
337 opt.action = m->tcf_action;
338 opt.eaction = m->tcfm_eaction;
339 dev = tcf_mirred_dev_dereference(m);
340 if (dev)
341 opt.ifindex = dev->ifindex;
342
343 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
344 goto nla_put_failure;
345
346 tcf_tm_dump(&t, &m->tcf_tm);
347 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
348 goto nla_put_failure;
349 spin_unlock_bh(&m->tcf_lock);
350
351 return skb->len;
352
353nla_put_failure:
354 spin_unlock_bh(&m->tcf_lock);
355 nlmsg_trim(skb, b);
356 return -1;
357}
358
359static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
360 struct netlink_callback *cb, int type,
361 const struct tc_action_ops *ops,
362 struct netlink_ext_ack *extack)
363{
364 struct tc_action_net *tn = net_generic(net, mirred_net_id);
365
366 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
367}
368
369static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
370{
371 struct tc_action_net *tn = net_generic(net, mirred_net_id);
372
373 return tcf_idr_search(tn, a, index);
374}
375
376static int mirred_device_event(struct notifier_block *unused,
377 unsigned long event, void *ptr)
378{
379 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
380 struct tcf_mirred *m;
381
382 ASSERT_RTNL();
383 if (event == NETDEV_UNREGISTER) {
384 spin_lock(&mirred_list_lock);
385 list_for_each_entry(m, &mirred_list, tcfm_list) {
386 spin_lock_bh(&m->tcf_lock);
387 if (tcf_mirred_dev_dereference(m) == dev) {
388 dev_put(dev);
389 /* Note : no rcu grace period necessary, as
390 * net_device are already rcu protected.
391 */
392 RCU_INIT_POINTER(m->tcfm_dev, NULL);
393 }
394 spin_unlock_bh(&m->tcf_lock);
395 }
396 spin_unlock(&mirred_list_lock);
397 }
398
399 return NOTIFY_DONE;
400}
401
402static struct notifier_block mirred_device_notifier = {
403 .notifier_call = mirred_device_event,
404};
405
406static void tcf_mirred_dev_put(void *priv)
407{
408 struct net_device *dev = priv;
409
410 dev_put(dev);
411}
412
413static struct net_device *
414tcf_mirred_get_dev(const struct tc_action *a,
415 tc_action_priv_destructor *destructor)
416{
417 struct tcf_mirred *m = to_mirred(a);
418 struct net_device *dev;
419
420 rcu_read_lock();
421 dev = rcu_dereference(m->tcfm_dev);
422 if (dev) {
423 dev_hold(dev);
424 *destructor = tcf_mirred_dev_put;
425 }
426 rcu_read_unlock();
427
428 return dev;
429}
430
431static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
432{
433 return nla_total_size(sizeof(struct tc_mirred));
434}
435
436static struct tc_action_ops act_mirred_ops = {
437 .kind = "mirred",
438 .id = TCA_ID_MIRRED,
439 .owner = THIS_MODULE,
440 .act = tcf_mirred_act,
441 .stats_update = tcf_stats_update,
442 .dump = tcf_mirred_dump,
443 .cleanup = tcf_mirred_release,
444 .init = tcf_mirred_init,
445 .walk = tcf_mirred_walker,
446 .lookup = tcf_mirred_search,
447 .get_fill_size = tcf_mirred_get_fill_size,
448 .size = sizeof(struct tcf_mirred),
449 .get_dev = tcf_mirred_get_dev,
450};
451
452static __net_init int mirred_init_net(struct net *net)
453{
454 struct tc_action_net *tn = net_generic(net, mirred_net_id);
455
456 return tc_action_net_init(net, tn, &act_mirred_ops);
457}
458
459static void __net_exit mirred_exit_net(struct list_head *net_list)
460{
461 tc_action_net_exit(net_list, mirred_net_id);
462}
463
464static struct pernet_operations mirred_net_ops = {
465 .init = mirred_init_net,
466 .exit_batch = mirred_exit_net,
467 .id = &mirred_net_id,
468 .size = sizeof(struct tc_action_net),
469};
470
471MODULE_AUTHOR("Jamal Hadi Salim(2002)");
472MODULE_DESCRIPTION("Device Mirror/redirect actions");
473MODULE_LICENSE("GPL");
474
475static int __init mirred_init_module(void)
476{
477 int err = register_netdevice_notifier(&mirred_device_notifier);
478 if (err)
479 return err;
480
481 pr_info("Mirror/redirect action on\n");
482 err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
483 if (err)
484 unregister_netdevice_notifier(&mirred_device_notifier);
485
486 return err;
487}
488
489static void __exit mirred_cleanup_module(void)
490{
491 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
492 unregister_netdevice_notifier(&mirred_device_notifier);
493}
494
495module_init(mirred_init_module);
496module_exit(mirred_cleanup_module);
1/*
2 * net/sched/act_mirred.c packet mirroring and redirect actions
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Jamal Hadi Salim (2002-4)
10 *
11 * TODO: Add ingress support (and socket redirect support)
12 *
13 */
14
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <linux/rtnetlink.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/gfp.h>
24#include <linux/if_arp.h>
25#include <net/net_namespace.h>
26#include <net/netlink.h>
27#include <net/pkt_sched.h>
28#include <linux/tc_act/tc_mirred.h>
29#include <net/tc_act/tc_mirred.h>
30
31static LIST_HEAD(mirred_list);
32
33static bool tcf_mirred_is_act_redirect(int action)
34{
35 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
36}
37
38static bool tcf_mirred_act_wants_ingress(int action)
39{
40 switch (action) {
41 case TCA_EGRESS_REDIR:
42 case TCA_EGRESS_MIRROR:
43 return false;
44 case TCA_INGRESS_REDIR:
45 case TCA_INGRESS_MIRROR:
46 return true;
47 default:
48 BUG();
49 }
50}
51
52static void tcf_mirred_release(struct tc_action *a)
53{
54 struct tcf_mirred *m = to_mirred(a);
55 struct net_device *dev;
56
57 list_del(&m->tcfm_list);
58 dev = rtnl_dereference(m->tcfm_dev);
59 if (dev)
60 dev_put(dev);
61}
62
63static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
64 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
65};
66
67static unsigned int mirred_net_id;
68static struct tc_action_ops act_mirred_ops;
69
70static int tcf_mirred_init(struct net *net, struct nlattr *nla,
71 struct nlattr *est, struct tc_action **a, int ovr,
72 int bind, struct netlink_ext_ack *extack)
73{
74 struct tc_action_net *tn = net_generic(net, mirred_net_id);
75 struct nlattr *tb[TCA_MIRRED_MAX + 1];
76 bool mac_header_xmit = false;
77 struct tc_mirred *parm;
78 struct tcf_mirred *m;
79 struct net_device *dev;
80 bool exists = false;
81 int ret;
82
83 if (!nla) {
84 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
85 return -EINVAL;
86 }
87 ret = nla_parse_nested(tb, TCA_MIRRED_MAX, nla, mirred_policy, extack);
88 if (ret < 0)
89 return ret;
90 if (!tb[TCA_MIRRED_PARMS]) {
91 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
92 return -EINVAL;
93 }
94 parm = nla_data(tb[TCA_MIRRED_PARMS]);
95
96 exists = tcf_idr_check(tn, parm->index, a, bind);
97 if (exists && bind)
98 return 0;
99
100 switch (parm->eaction) {
101 case TCA_EGRESS_MIRROR:
102 case TCA_EGRESS_REDIR:
103 case TCA_INGRESS_REDIR:
104 case TCA_INGRESS_MIRROR:
105 break;
106 default:
107 if (exists)
108 tcf_idr_release(*a, bind);
109 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
110 return -EINVAL;
111 }
112 if (parm->ifindex) {
113 dev = __dev_get_by_index(net, parm->ifindex);
114 if (dev == NULL) {
115 if (exists)
116 tcf_idr_release(*a, bind);
117 return -ENODEV;
118 }
119 mac_header_xmit = dev_is_mac_header_xmit(dev);
120 } else {
121 dev = NULL;
122 }
123
124 if (!exists) {
125 if (!dev) {
126 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
127 return -EINVAL;
128 }
129 ret = tcf_idr_create(tn, parm->index, est, a,
130 &act_mirred_ops, bind, true);
131 if (ret)
132 return ret;
133 ret = ACT_P_CREATED;
134 } else {
135 tcf_idr_release(*a, bind);
136 if (!ovr)
137 return -EEXIST;
138 }
139 m = to_mirred(*a);
140
141 ASSERT_RTNL();
142 m->tcf_action = parm->action;
143 m->tcfm_eaction = parm->eaction;
144 if (dev != NULL) {
145 if (ret != ACT_P_CREATED)
146 dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
147 dev_hold(dev);
148 rcu_assign_pointer(m->tcfm_dev, dev);
149 m->tcfm_mac_header_xmit = mac_header_xmit;
150 }
151
152 if (ret == ACT_P_CREATED) {
153 list_add(&m->tcfm_list, &mirred_list);
154 tcf_idr_insert(tn, *a);
155 }
156
157 return ret;
158}
159
160static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
161 struct tcf_result *res)
162{
163 struct tcf_mirred *m = to_mirred(a);
164 bool m_mac_header_xmit;
165 struct net_device *dev;
166 struct sk_buff *skb2;
167 int retval, err = 0;
168 int m_eaction;
169 int mac_len;
170
171 tcf_lastuse_update(&m->tcf_tm);
172 bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
173
174 rcu_read_lock();
175 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
176 m_eaction = READ_ONCE(m->tcfm_eaction);
177 retval = READ_ONCE(m->tcf_action);
178 dev = rcu_dereference(m->tcfm_dev);
179 if (unlikely(!dev)) {
180 pr_notice_once("tc mirred: target device is gone\n");
181 goto out;
182 }
183
184 if (unlikely(!(dev->flags & IFF_UP))) {
185 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
186 dev->name);
187 goto out;
188 }
189
190 skb2 = skb_clone(skb, GFP_ATOMIC);
191 if (!skb2)
192 goto out;
193
194 /* If action's target direction differs than filter's direction,
195 * and devices expect a mac header on xmit, then mac push/pull is
196 * needed.
197 */
198 if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) &&
199 m_mac_header_xmit) {
200 if (!skb_at_tc_ingress(skb)) {
201 /* caught at egress, act ingress: pull mac */
202 mac_len = skb_network_header(skb) - skb_mac_header(skb);
203 skb_pull_rcsum(skb2, mac_len);
204 } else {
205 /* caught at ingress, act egress: push mac */
206 skb_push_rcsum(skb2, skb->mac_len);
207 }
208 }
209
210 /* mirror is always swallowed */
211 if (tcf_mirred_is_act_redirect(m_eaction)) {
212 skb2->tc_redirected = 1;
213 skb2->tc_from_ingress = skb2->tc_at_ingress;
214 }
215
216 skb2->skb_iif = skb->dev->ifindex;
217 skb2->dev = dev;
218 if (!tcf_mirred_act_wants_ingress(m_eaction))
219 err = dev_queue_xmit(skb2);
220 else
221 err = netif_receive_skb(skb2);
222
223 if (err) {
224out:
225 qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
226 if (tcf_mirred_is_act_redirect(m_eaction))
227 retval = TC_ACT_SHOT;
228 }
229 rcu_read_unlock();
230
231 return retval;
232}
233
234static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
235 u64 lastuse)
236{
237 struct tcf_mirred *m = to_mirred(a);
238 struct tcf_t *tm = &m->tcf_tm;
239
240 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
241 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
242}
243
244static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
245 int ref)
246{
247 unsigned char *b = skb_tail_pointer(skb);
248 struct tcf_mirred *m = to_mirred(a);
249 struct net_device *dev = rtnl_dereference(m->tcfm_dev);
250 struct tc_mirred opt = {
251 .index = m->tcf_index,
252 .action = m->tcf_action,
253 .refcnt = m->tcf_refcnt - ref,
254 .bindcnt = m->tcf_bindcnt - bind,
255 .eaction = m->tcfm_eaction,
256 .ifindex = dev ? dev->ifindex : 0,
257 };
258 struct tcf_t t;
259
260 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
261 goto nla_put_failure;
262
263 tcf_tm_dump(&t, &m->tcf_tm);
264 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
265 goto nla_put_failure;
266 return skb->len;
267
268nla_put_failure:
269 nlmsg_trim(skb, b);
270 return -1;
271}
272
273static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
274 struct netlink_callback *cb, int type,
275 const struct tc_action_ops *ops,
276 struct netlink_ext_ack *extack)
277{
278 struct tc_action_net *tn = net_generic(net, mirred_net_id);
279
280 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
281}
282
283static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index,
284 struct netlink_ext_ack *extack)
285{
286 struct tc_action_net *tn = net_generic(net, mirred_net_id);
287
288 return tcf_idr_search(tn, a, index);
289}
290
291static int mirred_device_event(struct notifier_block *unused,
292 unsigned long event, void *ptr)
293{
294 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
295 struct tcf_mirred *m;
296
297 ASSERT_RTNL();
298 if (event == NETDEV_UNREGISTER) {
299 list_for_each_entry(m, &mirred_list, tcfm_list) {
300 if (rcu_access_pointer(m->tcfm_dev) == dev) {
301 dev_put(dev);
302 /* Note : no rcu grace period necessary, as
303 * net_device are already rcu protected.
304 */
305 RCU_INIT_POINTER(m->tcfm_dev, NULL);
306 }
307 }
308 }
309
310 return NOTIFY_DONE;
311}
312
313static struct notifier_block mirred_device_notifier = {
314 .notifier_call = mirred_device_event,
315};
316
317static struct net_device *tcf_mirred_get_dev(const struct tc_action *a)
318{
319 struct tcf_mirred *m = to_mirred(a);
320
321 return rtnl_dereference(m->tcfm_dev);
322}
323
324static struct tc_action_ops act_mirred_ops = {
325 .kind = "mirred",
326 .type = TCA_ACT_MIRRED,
327 .owner = THIS_MODULE,
328 .act = tcf_mirred,
329 .stats_update = tcf_stats_update,
330 .dump = tcf_mirred_dump,
331 .cleanup = tcf_mirred_release,
332 .init = tcf_mirred_init,
333 .walk = tcf_mirred_walker,
334 .lookup = tcf_mirred_search,
335 .size = sizeof(struct tcf_mirred),
336 .get_dev = tcf_mirred_get_dev,
337};
338
339static __net_init int mirred_init_net(struct net *net)
340{
341 struct tc_action_net *tn = net_generic(net, mirred_net_id);
342
343 return tc_action_net_init(tn, &act_mirred_ops);
344}
345
346static void __net_exit mirred_exit_net(struct list_head *net_list)
347{
348 tc_action_net_exit(net_list, mirred_net_id);
349}
350
351static struct pernet_operations mirred_net_ops = {
352 .init = mirred_init_net,
353 .exit_batch = mirred_exit_net,
354 .id = &mirred_net_id,
355 .size = sizeof(struct tc_action_net),
356};
357
358MODULE_AUTHOR("Jamal Hadi Salim(2002)");
359MODULE_DESCRIPTION("Device Mirror/redirect actions");
360MODULE_LICENSE("GPL");
361
362static int __init mirred_init_module(void)
363{
364 int err = register_netdevice_notifier(&mirred_device_notifier);
365 if (err)
366 return err;
367
368 pr_info("Mirror/redirect action on\n");
369 return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
370}
371
372static void __exit mirred_cleanup_module(void)
373{
374 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
375 unregister_netdevice_notifier(&mirred_device_notifier);
376}
377
378module_init(mirred_init_module);
379module_exit(mirred_cleanup_module);