Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/act_mirred.c packet mirroring and redirect actions
4 *
5 * Authors: Jamal Hadi Salim (2002-4)
6 *
7 * TODO: Add ingress support (and socket redirect support)
8 */
9
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <linux/rtnetlink.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/gfp.h>
19#include <linux/if_arp.h>
20#include <net/net_namespace.h>
21#include <net/netlink.h>
22#include <net/pkt_sched.h>
23#include <net/pkt_cls.h>
24#include <linux/tc_act/tc_mirred.h>
25#include <net/tc_act/tc_mirred.h>
26
27static LIST_HEAD(mirred_list);
28static DEFINE_SPINLOCK(mirred_list_lock);
29
30#define MIRRED_RECURSION_LIMIT 4
31static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
32
33static bool tcf_mirred_is_act_redirect(int action)
34{
35 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
36}
37
38static bool tcf_mirred_act_wants_ingress(int action)
39{
40 switch (action) {
41 case TCA_EGRESS_REDIR:
42 case TCA_EGRESS_MIRROR:
43 return false;
44 case TCA_INGRESS_REDIR:
45 case TCA_INGRESS_MIRROR:
46 return true;
47 default:
48 BUG();
49 }
50}
51
52static bool tcf_mirred_can_reinsert(int action)
53{
54 switch (action) {
55 case TC_ACT_SHOT:
56 case TC_ACT_STOLEN:
57 case TC_ACT_QUEUED:
58 case TC_ACT_TRAP:
59 return true;
60 }
61 return false;
62}
63
64static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
65{
66 return rcu_dereference_protected(m->tcfm_dev,
67 lockdep_is_held(&m->tcf_lock));
68}
69
70static void tcf_mirred_release(struct tc_action *a)
71{
72 struct tcf_mirred *m = to_mirred(a);
73 struct net_device *dev;
74
75 spin_lock(&mirred_list_lock);
76 list_del(&m->tcfm_list);
77 spin_unlock(&mirred_list_lock);
78
79 /* last reference to action, no need to lock */
80 dev = rcu_dereference_protected(m->tcfm_dev, 1);
81 if (dev)
82 dev_put(dev);
83}
84
85static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
86 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
87};
88
89static unsigned int mirred_net_id;
90static struct tc_action_ops act_mirred_ops;
91
92static int tcf_mirred_init(struct net *net, struct nlattr *nla,
93 struct nlattr *est, struct tc_action **a,
94 int ovr, int bind, bool rtnl_held,
95 struct tcf_proto *tp,
96 u32 flags, struct netlink_ext_ack *extack)
97{
98 struct tc_action_net *tn = net_generic(net, mirred_net_id);
99 struct nlattr *tb[TCA_MIRRED_MAX + 1];
100 struct tcf_chain *goto_ch = NULL;
101 bool mac_header_xmit = false;
102 struct tc_mirred *parm;
103 struct tcf_mirred *m;
104 struct net_device *dev;
105 bool exists = false;
106 int ret, err;
107 u32 index;
108
109 if (!nla) {
110 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
111 return -EINVAL;
112 }
113 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
114 mirred_policy, extack);
115 if (ret < 0)
116 return ret;
117 if (!tb[TCA_MIRRED_PARMS]) {
118 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
119 return -EINVAL;
120 }
121 parm = nla_data(tb[TCA_MIRRED_PARMS]);
122 index = parm->index;
123 err = tcf_idr_check_alloc(tn, &index, a, bind);
124 if (err < 0)
125 return err;
126 exists = err;
127 if (exists && bind)
128 return 0;
129
130 switch (parm->eaction) {
131 case TCA_EGRESS_MIRROR:
132 case TCA_EGRESS_REDIR:
133 case TCA_INGRESS_REDIR:
134 case TCA_INGRESS_MIRROR:
135 break;
136 default:
137 if (exists)
138 tcf_idr_release(*a, bind);
139 else
140 tcf_idr_cleanup(tn, index);
141 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
142 return -EINVAL;
143 }
144
145 if (!exists) {
146 if (!parm->ifindex) {
147 tcf_idr_cleanup(tn, index);
148 NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
149 return -EINVAL;
150 }
151 ret = tcf_idr_create_from_flags(tn, index, est, a,
152 &act_mirred_ops, bind, flags);
153 if (ret) {
154 tcf_idr_cleanup(tn, index);
155 return ret;
156 }
157 ret = ACT_P_CREATED;
158 } else if (!ovr) {
159 tcf_idr_release(*a, bind);
160 return -EEXIST;
161 }
162
163 m = to_mirred(*a);
164 if (ret == ACT_P_CREATED)
165 INIT_LIST_HEAD(&m->tcfm_list);
166
167 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
168 if (err < 0)
169 goto release_idr;
170
171 spin_lock_bh(&m->tcf_lock);
172
173 if (parm->ifindex) {
174 dev = dev_get_by_index(net, parm->ifindex);
175 if (!dev) {
176 spin_unlock_bh(&m->tcf_lock);
177 err = -ENODEV;
178 goto put_chain;
179 }
180 mac_header_xmit = dev_is_mac_header_xmit(dev);
181 dev = rcu_replace_pointer(m->tcfm_dev, dev,
182 lockdep_is_held(&m->tcf_lock));
183 if (dev)
184 dev_put(dev);
185 m->tcfm_mac_header_xmit = mac_header_xmit;
186 }
187 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
188 m->tcfm_eaction = parm->eaction;
189 spin_unlock_bh(&m->tcf_lock);
190 if (goto_ch)
191 tcf_chain_put_by_act(goto_ch);
192
193 if (ret == ACT_P_CREATED) {
194 spin_lock(&mirred_list_lock);
195 list_add(&m->tcfm_list, &mirred_list);
196 spin_unlock(&mirred_list_lock);
197 }
198
199 return ret;
200put_chain:
201 if (goto_ch)
202 tcf_chain_put_by_act(goto_ch);
203release_idr:
204 tcf_idr_release(*a, bind);
205 return err;
206}
207
208static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
209 struct tcf_result *res)
210{
211 struct tcf_mirred *m = to_mirred(a);
212 struct sk_buff *skb2 = skb;
213 bool m_mac_header_xmit;
214 struct net_device *dev;
215 unsigned int rec_level;
216 int retval, err = 0;
217 bool use_reinsert;
218 bool want_ingress;
219 bool is_redirect;
220 bool expects_nh;
221 int m_eaction;
222 int mac_len;
223 bool at_nh;
224
225 rec_level = __this_cpu_inc_return(mirred_rec_level);
226 if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
227 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
228 netdev_name(skb->dev));
229 __this_cpu_dec(mirred_rec_level);
230 return TC_ACT_SHOT;
231 }
232
233 tcf_lastuse_update(&m->tcf_tm);
234 tcf_action_update_bstats(&m->common, skb);
235
236 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
237 m_eaction = READ_ONCE(m->tcfm_eaction);
238 retval = READ_ONCE(m->tcf_action);
239 dev = rcu_dereference_bh(m->tcfm_dev);
240 if (unlikely(!dev)) {
241 pr_notice_once("tc mirred: target device is gone\n");
242 goto out;
243 }
244
245 if (unlikely(!(dev->flags & IFF_UP))) {
246 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
247 dev->name);
248 goto out;
249 }
250
251 /* we could easily avoid the clone only if called by ingress and clsact;
252 * since we can't easily detect the clsact caller, skip clone only for
253 * ingress - that covers the TC S/W datapath.
254 */
255 is_redirect = tcf_mirred_is_act_redirect(m_eaction);
256 use_reinsert = skb_at_tc_ingress(skb) && is_redirect &&
257 tcf_mirred_can_reinsert(retval);
258 if (!use_reinsert) {
259 skb2 = skb_clone(skb, GFP_ATOMIC);
260 if (!skb2)
261 goto out;
262 }
263
264 want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
265
266 expects_nh = want_ingress || !m_mac_header_xmit;
267 at_nh = skb->data == skb_network_header(skb);
268 if (at_nh != expects_nh) {
269 mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
270 skb_network_header(skb) - skb_mac_header(skb);
271 if (expects_nh) {
272 /* target device/action expect data at nh */
273 skb_pull_rcsum(skb2, mac_len);
274 } else {
275 /* target device/action expect data at mac */
276 skb_push_rcsum(skb2, mac_len);
277 }
278 }
279
280 skb2->skb_iif = skb->dev->ifindex;
281 skb2->dev = dev;
282
283 /* mirror is always swallowed */
284 if (is_redirect) {
285 skb_set_redirected(skb2, skb2->tc_at_ingress);
286
287 /* let's the caller reinsert the packet, if possible */
288 if (use_reinsert) {
289 res->ingress = want_ingress;
290 if (skb_tc_reinsert(skb, res))
291 tcf_action_inc_overlimit_qstats(&m->common);
292 __this_cpu_dec(mirred_rec_level);
293 return TC_ACT_CONSUMED;
294 }
295 }
296
297 if (!want_ingress)
298 err = dev_queue_xmit(skb2);
299 else
300 err = netif_receive_skb(skb2);
301
302 if (err) {
303out:
304 tcf_action_inc_overlimit_qstats(&m->common);
305 if (tcf_mirred_is_act_redirect(m_eaction))
306 retval = TC_ACT_SHOT;
307 }
308 __this_cpu_dec(mirred_rec_level);
309
310 return retval;
311}
312
313static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
314 u64 drops, u64 lastuse, bool hw)
315{
316 struct tcf_mirred *m = to_mirred(a);
317 struct tcf_t *tm = &m->tcf_tm;
318
319 tcf_action_update_stats(a, bytes, packets, drops, hw);
320 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
321}
322
323static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
324 int ref)
325{
326 unsigned char *b = skb_tail_pointer(skb);
327 struct tcf_mirred *m = to_mirred(a);
328 struct tc_mirred opt = {
329 .index = m->tcf_index,
330 .refcnt = refcount_read(&m->tcf_refcnt) - ref,
331 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
332 };
333 struct net_device *dev;
334 struct tcf_t t;
335
336 spin_lock_bh(&m->tcf_lock);
337 opt.action = m->tcf_action;
338 opt.eaction = m->tcfm_eaction;
339 dev = tcf_mirred_dev_dereference(m);
340 if (dev)
341 opt.ifindex = dev->ifindex;
342
343 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
344 goto nla_put_failure;
345
346 tcf_tm_dump(&t, &m->tcf_tm);
347 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
348 goto nla_put_failure;
349 spin_unlock_bh(&m->tcf_lock);
350
351 return skb->len;
352
353nla_put_failure:
354 spin_unlock_bh(&m->tcf_lock);
355 nlmsg_trim(skb, b);
356 return -1;
357}
358
359static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
360 struct netlink_callback *cb, int type,
361 const struct tc_action_ops *ops,
362 struct netlink_ext_ack *extack)
363{
364 struct tc_action_net *tn = net_generic(net, mirred_net_id);
365
366 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
367}
368
369static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
370{
371 struct tc_action_net *tn = net_generic(net, mirred_net_id);
372
373 return tcf_idr_search(tn, a, index);
374}
375
376static int mirred_device_event(struct notifier_block *unused,
377 unsigned long event, void *ptr)
378{
379 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
380 struct tcf_mirred *m;
381
382 ASSERT_RTNL();
383 if (event == NETDEV_UNREGISTER) {
384 spin_lock(&mirred_list_lock);
385 list_for_each_entry(m, &mirred_list, tcfm_list) {
386 spin_lock_bh(&m->tcf_lock);
387 if (tcf_mirred_dev_dereference(m) == dev) {
388 dev_put(dev);
389 /* Note : no rcu grace period necessary, as
390 * net_device are already rcu protected.
391 */
392 RCU_INIT_POINTER(m->tcfm_dev, NULL);
393 }
394 spin_unlock_bh(&m->tcf_lock);
395 }
396 spin_unlock(&mirred_list_lock);
397 }
398
399 return NOTIFY_DONE;
400}
401
402static struct notifier_block mirred_device_notifier = {
403 .notifier_call = mirred_device_event,
404};
405
406static void tcf_mirred_dev_put(void *priv)
407{
408 struct net_device *dev = priv;
409
410 dev_put(dev);
411}
412
413static struct net_device *
414tcf_mirred_get_dev(const struct tc_action *a,
415 tc_action_priv_destructor *destructor)
416{
417 struct tcf_mirred *m = to_mirred(a);
418 struct net_device *dev;
419
420 rcu_read_lock();
421 dev = rcu_dereference(m->tcfm_dev);
422 if (dev) {
423 dev_hold(dev);
424 *destructor = tcf_mirred_dev_put;
425 }
426 rcu_read_unlock();
427
428 return dev;
429}
430
431static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
432{
433 return nla_total_size(sizeof(struct tc_mirred));
434}
435
436static struct tc_action_ops act_mirred_ops = {
437 .kind = "mirred",
438 .id = TCA_ID_MIRRED,
439 .owner = THIS_MODULE,
440 .act = tcf_mirred_act,
441 .stats_update = tcf_stats_update,
442 .dump = tcf_mirred_dump,
443 .cleanup = tcf_mirred_release,
444 .init = tcf_mirred_init,
445 .walk = tcf_mirred_walker,
446 .lookup = tcf_mirred_search,
447 .get_fill_size = tcf_mirred_get_fill_size,
448 .size = sizeof(struct tcf_mirred),
449 .get_dev = tcf_mirred_get_dev,
450};
451
452static __net_init int mirred_init_net(struct net *net)
453{
454 struct tc_action_net *tn = net_generic(net, mirred_net_id);
455
456 return tc_action_net_init(net, tn, &act_mirred_ops);
457}
458
459static void __net_exit mirred_exit_net(struct list_head *net_list)
460{
461 tc_action_net_exit(net_list, mirred_net_id);
462}
463
464static struct pernet_operations mirred_net_ops = {
465 .init = mirred_init_net,
466 .exit_batch = mirred_exit_net,
467 .id = &mirred_net_id,
468 .size = sizeof(struct tc_action_net),
469};
470
471MODULE_AUTHOR("Jamal Hadi Salim(2002)");
472MODULE_DESCRIPTION("Device Mirror/redirect actions");
473MODULE_LICENSE("GPL");
474
475static int __init mirred_init_module(void)
476{
477 int err = register_netdevice_notifier(&mirred_device_notifier);
478 if (err)
479 return err;
480
481 pr_info("Mirror/redirect action on\n");
482 err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
483 if (err)
484 unregister_netdevice_notifier(&mirred_device_notifier);
485
486 return err;
487}
488
489static void __exit mirred_cleanup_module(void)
490{
491 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
492 unregister_netdevice_notifier(&mirred_device_notifier);
493}
494
495module_init(mirred_init_module);
496module_exit(mirred_cleanup_module);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/act_mirred.c packet mirroring and redirect actions
4 *
5 * Authors: Jamal Hadi Salim (2002-4)
6 *
7 * TODO: Add ingress support (and socket redirect support)
8 */
9
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <linux/rtnetlink.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/gfp.h>
19#include <linux/if_arp.h>
20#include <net/net_namespace.h>
21#include <net/netlink.h>
22#include <net/dst.h>
23#include <net/pkt_sched.h>
24#include <net/pkt_cls.h>
25#include <linux/tc_act/tc_mirred.h>
26#include <net/tc_act/tc_mirred.h>
27#include <net/tc_wrapper.h>
28
29static LIST_HEAD(mirred_list);
30static DEFINE_SPINLOCK(mirred_list_lock);
31
32#define MIRRED_NEST_LIMIT 4
33static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
34
35static bool tcf_mirred_is_act_redirect(int action)
36{
37 return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
38}
39
40static bool tcf_mirred_act_wants_ingress(int action)
41{
42 switch (action) {
43 case TCA_EGRESS_REDIR:
44 case TCA_EGRESS_MIRROR:
45 return false;
46 case TCA_INGRESS_REDIR:
47 case TCA_INGRESS_MIRROR:
48 return true;
49 default:
50 BUG();
51 }
52}
53
54static bool tcf_mirred_can_reinsert(int action)
55{
56 switch (action) {
57 case TC_ACT_SHOT:
58 case TC_ACT_STOLEN:
59 case TC_ACT_QUEUED:
60 case TC_ACT_TRAP:
61 return true;
62 }
63 return false;
64}
65
66static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
67{
68 return rcu_dereference_protected(m->tcfm_dev,
69 lockdep_is_held(&m->tcf_lock));
70}
71
72static void tcf_mirred_release(struct tc_action *a)
73{
74 struct tcf_mirred *m = to_mirred(a);
75 struct net_device *dev;
76
77 spin_lock(&mirred_list_lock);
78 list_del(&m->tcfm_list);
79 spin_unlock(&mirred_list_lock);
80
81 /* last reference to action, no need to lock */
82 dev = rcu_dereference_protected(m->tcfm_dev, 1);
83 netdev_put(dev, &m->tcfm_dev_tracker);
84}
85
86static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
87 [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
88 [TCA_MIRRED_BLOCKID] = NLA_POLICY_MIN(NLA_U32, 1),
89};
90
91static struct tc_action_ops act_mirred_ops;
92
93static void tcf_mirred_replace_dev(struct tcf_mirred *m,
94 struct net_device *ndev)
95{
96 struct net_device *odev;
97
98 odev = rcu_replace_pointer(m->tcfm_dev, ndev,
99 lockdep_is_held(&m->tcf_lock));
100 netdev_put(odev, &m->tcfm_dev_tracker);
101}
102
103static int tcf_mirred_init(struct net *net, struct nlattr *nla,
104 struct nlattr *est, struct tc_action **a,
105 struct tcf_proto *tp,
106 u32 flags, struct netlink_ext_ack *extack)
107{
108 struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
109 bool bind = flags & TCA_ACT_FLAGS_BIND;
110 struct nlattr *tb[TCA_MIRRED_MAX + 1];
111 struct tcf_chain *goto_ch = NULL;
112 bool mac_header_xmit = false;
113 struct tc_mirred *parm;
114 struct tcf_mirred *m;
115 bool exists = false;
116 int ret, err;
117 u32 index;
118
119 if (!nla) {
120 NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
121 return -EINVAL;
122 }
123 ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
124 mirred_policy, extack);
125 if (ret < 0)
126 return ret;
127 if (!tb[TCA_MIRRED_PARMS]) {
128 NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
129 return -EINVAL;
130 }
131 parm = nla_data(tb[TCA_MIRRED_PARMS]);
132 index = parm->index;
133 err = tcf_idr_check_alloc(tn, &index, a, bind);
134 if (err < 0)
135 return err;
136 exists = err;
137 if (exists && bind)
138 return ACT_P_BOUND;
139
140 if (tb[TCA_MIRRED_BLOCKID] && parm->ifindex) {
141 NL_SET_ERR_MSG_MOD(extack,
142 "Cannot specify Block ID and dev simultaneously");
143 if (exists)
144 tcf_idr_release(*a, bind);
145 else
146 tcf_idr_cleanup(tn, index);
147
148 return -EINVAL;
149 }
150
151 switch (parm->eaction) {
152 case TCA_EGRESS_MIRROR:
153 case TCA_EGRESS_REDIR:
154 case TCA_INGRESS_REDIR:
155 case TCA_INGRESS_MIRROR:
156 break;
157 default:
158 if (exists)
159 tcf_idr_release(*a, bind);
160 else
161 tcf_idr_cleanup(tn, index);
162 NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
163 return -EINVAL;
164 }
165
166 if (!exists) {
167 if (!parm->ifindex && !tb[TCA_MIRRED_BLOCKID]) {
168 tcf_idr_cleanup(tn, index);
169 NL_SET_ERR_MSG_MOD(extack,
170 "Must specify device or block");
171 return -EINVAL;
172 }
173 ret = tcf_idr_create_from_flags(tn, index, est, a,
174 &act_mirred_ops, bind, flags);
175 if (ret) {
176 tcf_idr_cleanup(tn, index);
177 return ret;
178 }
179 ret = ACT_P_CREATED;
180 } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
181 tcf_idr_release(*a, bind);
182 return -EEXIST;
183 }
184
185 m = to_mirred(*a);
186 if (ret == ACT_P_CREATED)
187 INIT_LIST_HEAD(&m->tcfm_list);
188
189 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
190 if (err < 0)
191 goto release_idr;
192
193 spin_lock_bh(&m->tcf_lock);
194
195 if (parm->ifindex) {
196 struct net_device *ndev;
197
198 ndev = dev_get_by_index(net, parm->ifindex);
199 if (!ndev) {
200 spin_unlock_bh(&m->tcf_lock);
201 err = -ENODEV;
202 goto put_chain;
203 }
204 mac_header_xmit = dev_is_mac_header_xmit(ndev);
205 tcf_mirred_replace_dev(m, ndev);
206 netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
207 m->tcfm_mac_header_xmit = mac_header_xmit;
208 m->tcfm_blockid = 0;
209 } else if (tb[TCA_MIRRED_BLOCKID]) {
210 tcf_mirred_replace_dev(m, NULL);
211 m->tcfm_mac_header_xmit = false;
212 m->tcfm_blockid = nla_get_u32(tb[TCA_MIRRED_BLOCKID]);
213 }
214 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
215 m->tcfm_eaction = parm->eaction;
216 spin_unlock_bh(&m->tcf_lock);
217 if (goto_ch)
218 tcf_chain_put_by_act(goto_ch);
219
220 if (ret == ACT_P_CREATED) {
221 spin_lock(&mirred_list_lock);
222 list_add(&m->tcfm_list, &mirred_list);
223 spin_unlock(&mirred_list_lock);
224 }
225
226 return ret;
227put_chain:
228 if (goto_ch)
229 tcf_chain_put_by_act(goto_ch);
230release_idr:
231 tcf_idr_release(*a, bind);
232 return err;
233}
234
235static int
236tcf_mirred_forward(bool at_ingress, bool want_ingress, struct sk_buff *skb)
237{
238 int err;
239
240 if (!want_ingress)
241 err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
242 else if (!at_ingress)
243 err = netif_rx(skb);
244 else
245 err = netif_receive_skb(skb);
246
247 return err;
248}
249
250static int tcf_mirred_to_dev(struct sk_buff *skb, struct tcf_mirred *m,
251 struct net_device *dev,
252 const bool m_mac_header_xmit, int m_eaction,
253 int retval)
254{
255 struct sk_buff *skb_to_send = skb;
256 bool want_ingress;
257 bool is_redirect;
258 bool expects_nh;
259 bool at_ingress;
260 bool dont_clone;
261 int mac_len;
262 bool at_nh;
263 int err;
264
265 is_redirect = tcf_mirred_is_act_redirect(m_eaction);
266 if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
267 net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
268 dev->name);
269 goto err_cant_do;
270 }
271
272 /* we could easily avoid the clone only if called by ingress and clsact;
273 * since we can't easily detect the clsact caller, skip clone only for
274 * ingress - that covers the TC S/W datapath.
275 */
276 at_ingress = skb_at_tc_ingress(skb);
277 dont_clone = skb_at_tc_ingress(skb) && is_redirect &&
278 tcf_mirred_can_reinsert(retval);
279 if (!dont_clone) {
280 skb_to_send = skb_clone(skb, GFP_ATOMIC);
281 if (!skb_to_send)
282 goto err_cant_do;
283 }
284
285 want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
286
287 /* All mirred/redirected skbs should clear previous ct info */
288 nf_reset_ct(skb_to_send);
289 if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
290 skb_dst_drop(skb_to_send);
291
292 expects_nh = want_ingress || !m_mac_header_xmit;
293 at_nh = skb->data == skb_network_header(skb);
294 if (at_nh != expects_nh) {
295 mac_len = at_ingress ? skb->mac_len :
296 skb_network_offset(skb);
297 if (expects_nh) {
298 /* target device/action expect data at nh */
299 skb_pull_rcsum(skb_to_send, mac_len);
300 } else {
301 /* target device/action expect data at mac */
302 skb_push_rcsum(skb_to_send, mac_len);
303 }
304 }
305
306 skb_to_send->skb_iif = skb->dev->ifindex;
307 skb_to_send->dev = dev;
308
309 if (is_redirect) {
310 if (skb == skb_to_send)
311 retval = TC_ACT_CONSUMED;
312
313 skb_set_redirected(skb_to_send, skb_to_send->tc_at_ingress);
314
315 err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
316 } else {
317 err = tcf_mirred_forward(at_ingress, want_ingress, skb_to_send);
318 }
319 if (err)
320 tcf_action_inc_overlimit_qstats(&m->common);
321
322 return retval;
323
324err_cant_do:
325 if (is_redirect)
326 retval = TC_ACT_SHOT;
327 tcf_action_inc_overlimit_qstats(&m->common);
328 return retval;
329}
330
331static int tcf_blockcast_redir(struct sk_buff *skb, struct tcf_mirred *m,
332 struct tcf_block *block, int m_eaction,
333 const u32 exception_ifindex, int retval)
334{
335 struct net_device *dev_prev = NULL;
336 struct net_device *dev = NULL;
337 unsigned long index;
338 int mirred_eaction;
339
340 mirred_eaction = tcf_mirred_act_wants_ingress(m_eaction) ?
341 TCA_INGRESS_MIRROR : TCA_EGRESS_MIRROR;
342
343 xa_for_each(&block->ports, index, dev) {
344 if (index == exception_ifindex)
345 continue;
346
347 if (!dev_prev)
348 goto assign_prev;
349
350 tcf_mirred_to_dev(skb, m, dev_prev,
351 dev_is_mac_header_xmit(dev),
352 mirred_eaction, retval);
353assign_prev:
354 dev_prev = dev;
355 }
356
357 if (dev_prev)
358 return tcf_mirred_to_dev(skb, m, dev_prev,
359 dev_is_mac_header_xmit(dev_prev),
360 m_eaction, retval);
361
362 return retval;
363}
364
365static int tcf_blockcast_mirror(struct sk_buff *skb, struct tcf_mirred *m,
366 struct tcf_block *block, int m_eaction,
367 const u32 exception_ifindex, int retval)
368{
369 struct net_device *dev = NULL;
370 unsigned long index;
371
372 xa_for_each(&block->ports, index, dev) {
373 if (index == exception_ifindex)
374 continue;
375
376 tcf_mirred_to_dev(skb, m, dev,
377 dev_is_mac_header_xmit(dev),
378 m_eaction, retval);
379 }
380
381 return retval;
382}
383
384static int tcf_blockcast(struct sk_buff *skb, struct tcf_mirred *m,
385 const u32 blockid, struct tcf_result *res,
386 int retval)
387{
388 const u32 exception_ifindex = skb->dev->ifindex;
389 struct tcf_block *block;
390 bool is_redirect;
391 int m_eaction;
392
393 m_eaction = READ_ONCE(m->tcfm_eaction);
394 is_redirect = tcf_mirred_is_act_redirect(m_eaction);
395
396 /* we are already under rcu protection, so can call block lookup
397 * directly.
398 */
399 block = tcf_block_lookup(dev_net(skb->dev), blockid);
400 if (!block || xa_empty(&block->ports)) {
401 tcf_action_inc_overlimit_qstats(&m->common);
402 return retval;
403 }
404
405 if (is_redirect)
406 return tcf_blockcast_redir(skb, m, block, m_eaction,
407 exception_ifindex, retval);
408
409 /* If it's not redirect, it is mirror */
410 return tcf_blockcast_mirror(skb, m, block, m_eaction, exception_ifindex,
411 retval);
412}
413
414TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
415 const struct tc_action *a,
416 struct tcf_result *res)
417{
418 struct tcf_mirred *m = to_mirred(a);
419 int retval = READ_ONCE(m->tcf_action);
420 unsigned int nest_level;
421 bool m_mac_header_xmit;
422 struct net_device *dev;
423 int m_eaction;
424 u32 blockid;
425
426 nest_level = __this_cpu_inc_return(mirred_nest_level);
427 if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
428 net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
429 netdev_name(skb->dev));
430 retval = TC_ACT_SHOT;
431 goto dec_nest_level;
432 }
433
434 tcf_lastuse_update(&m->tcf_tm);
435 tcf_action_update_bstats(&m->common, skb);
436
437 blockid = READ_ONCE(m->tcfm_blockid);
438 if (blockid) {
439 retval = tcf_blockcast(skb, m, blockid, res, retval);
440 goto dec_nest_level;
441 }
442
443 dev = rcu_dereference_bh(m->tcfm_dev);
444 if (unlikely(!dev)) {
445 pr_notice_once("tc mirred: target device is gone\n");
446 tcf_action_inc_overlimit_qstats(&m->common);
447 goto dec_nest_level;
448 }
449
450 m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
451 m_eaction = READ_ONCE(m->tcfm_eaction);
452
453 retval = tcf_mirred_to_dev(skb, m, dev, m_mac_header_xmit, m_eaction,
454 retval);
455
456dec_nest_level:
457 __this_cpu_dec(mirred_nest_level);
458
459 return retval;
460}
461
462static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
463 u64 drops, u64 lastuse, bool hw)
464{
465 struct tcf_mirred *m = to_mirred(a);
466 struct tcf_t *tm = &m->tcf_tm;
467
468 tcf_action_update_stats(a, bytes, packets, drops, hw);
469 tm->lastuse = max_t(u64, tm->lastuse, lastuse);
470}
471
472static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
473 int ref)
474{
475 unsigned char *b = skb_tail_pointer(skb);
476 struct tcf_mirred *m = to_mirred(a);
477 struct tc_mirred opt = {
478 .index = m->tcf_index,
479 .refcnt = refcount_read(&m->tcf_refcnt) - ref,
480 .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
481 };
482 struct net_device *dev;
483 struct tcf_t t;
484 u32 blockid;
485
486 spin_lock_bh(&m->tcf_lock);
487 opt.action = m->tcf_action;
488 opt.eaction = m->tcfm_eaction;
489 dev = tcf_mirred_dev_dereference(m);
490 if (dev)
491 opt.ifindex = dev->ifindex;
492
493 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
494 goto nla_put_failure;
495
496 blockid = m->tcfm_blockid;
497 if (blockid && nla_put_u32(skb, TCA_MIRRED_BLOCKID, blockid))
498 goto nla_put_failure;
499
500 tcf_tm_dump(&t, &m->tcf_tm);
501 if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
502 goto nla_put_failure;
503 spin_unlock_bh(&m->tcf_lock);
504
505 return skb->len;
506
507nla_put_failure:
508 spin_unlock_bh(&m->tcf_lock);
509 nlmsg_trim(skb, b);
510 return -1;
511}
512
513static int mirred_device_event(struct notifier_block *unused,
514 unsigned long event, void *ptr)
515{
516 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
517 struct tcf_mirred *m;
518
519 ASSERT_RTNL();
520 if (event == NETDEV_UNREGISTER) {
521 spin_lock(&mirred_list_lock);
522 list_for_each_entry(m, &mirred_list, tcfm_list) {
523 spin_lock_bh(&m->tcf_lock);
524 if (tcf_mirred_dev_dereference(m) == dev) {
525 netdev_put(dev, &m->tcfm_dev_tracker);
526 /* Note : no rcu grace period necessary, as
527 * net_device are already rcu protected.
528 */
529 RCU_INIT_POINTER(m->tcfm_dev, NULL);
530 }
531 spin_unlock_bh(&m->tcf_lock);
532 }
533 spin_unlock(&mirred_list_lock);
534 }
535
536 return NOTIFY_DONE;
537}
538
539static struct notifier_block mirred_device_notifier = {
540 .notifier_call = mirred_device_event,
541};
542
543static void tcf_mirred_dev_put(void *priv)
544{
545 struct net_device *dev = priv;
546
547 dev_put(dev);
548}
549
550static struct net_device *
551tcf_mirred_get_dev(const struct tc_action *a,
552 tc_action_priv_destructor *destructor)
553{
554 struct tcf_mirred *m = to_mirred(a);
555 struct net_device *dev;
556
557 rcu_read_lock();
558 dev = rcu_dereference(m->tcfm_dev);
559 if (dev) {
560 dev_hold(dev);
561 *destructor = tcf_mirred_dev_put;
562 }
563 rcu_read_unlock();
564
565 return dev;
566}
567
568static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
569{
570 return nla_total_size(sizeof(struct tc_mirred));
571}
572
573static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
574 const struct tc_action *act)
575{
576 entry->dev = act->ops->get_dev(act, &entry->destructor);
577 if (!entry->dev)
578 return;
579 entry->destructor_priv = entry->dev;
580}
581
582static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
583 u32 *index_inc, bool bind,
584 struct netlink_ext_ack *extack)
585{
586 if (bind) {
587 struct flow_action_entry *entry = entry_data;
588
589 if (is_tcf_mirred_egress_redirect(act)) {
590 entry->id = FLOW_ACTION_REDIRECT;
591 tcf_offload_mirred_get_dev(entry, act);
592 } else if (is_tcf_mirred_egress_mirror(act)) {
593 entry->id = FLOW_ACTION_MIRRED;
594 tcf_offload_mirred_get_dev(entry, act);
595 } else if (is_tcf_mirred_ingress_redirect(act)) {
596 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
597 tcf_offload_mirred_get_dev(entry, act);
598 } else if (is_tcf_mirred_ingress_mirror(act)) {
599 entry->id = FLOW_ACTION_MIRRED_INGRESS;
600 tcf_offload_mirred_get_dev(entry, act);
601 } else {
602 NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload");
603 return -EOPNOTSUPP;
604 }
605 *index_inc = 1;
606 } else {
607 struct flow_offload_action *fl_action = entry_data;
608
609 if (is_tcf_mirred_egress_redirect(act))
610 fl_action->id = FLOW_ACTION_REDIRECT;
611 else if (is_tcf_mirred_egress_mirror(act))
612 fl_action->id = FLOW_ACTION_MIRRED;
613 else if (is_tcf_mirred_ingress_redirect(act))
614 fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
615 else if (is_tcf_mirred_ingress_mirror(act))
616 fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
617 else
618 return -EOPNOTSUPP;
619 }
620
621 return 0;
622}
623
624static struct tc_action_ops act_mirred_ops = {
625 .kind = "mirred",
626 .id = TCA_ID_MIRRED,
627 .owner = THIS_MODULE,
628 .act = tcf_mirred_act,
629 .stats_update = tcf_stats_update,
630 .dump = tcf_mirred_dump,
631 .cleanup = tcf_mirred_release,
632 .init = tcf_mirred_init,
633 .get_fill_size = tcf_mirred_get_fill_size,
634 .offload_act_setup = tcf_mirred_offload_act_setup,
635 .size = sizeof(struct tcf_mirred),
636 .get_dev = tcf_mirred_get_dev,
637};
638
639static __net_init int mirred_init_net(struct net *net)
640{
641 struct tc_action_net *tn = net_generic(net, act_mirred_ops.net_id);
642
643 return tc_action_net_init(net, tn, &act_mirred_ops);
644}
645
646static void __net_exit mirred_exit_net(struct list_head *net_list)
647{
648 tc_action_net_exit(net_list, act_mirred_ops.net_id);
649}
650
651static struct pernet_operations mirred_net_ops = {
652 .init = mirred_init_net,
653 .exit_batch = mirred_exit_net,
654 .id = &act_mirred_ops.net_id,
655 .size = sizeof(struct tc_action_net),
656};
657
658MODULE_AUTHOR("Jamal Hadi Salim(2002)");
659MODULE_DESCRIPTION("Device Mirror/redirect actions");
660MODULE_LICENSE("GPL");
661
662static int __init mirred_init_module(void)
663{
664 int err = register_netdevice_notifier(&mirred_device_notifier);
665 if (err)
666 return err;
667
668 pr_info("Mirror/redirect action on\n");
669 err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
670 if (err)
671 unregister_netdevice_notifier(&mirred_device_notifier);
672
673 return err;
674}
675
676static void __exit mirred_cleanup_module(void)
677{
678 tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
679 unregister_netdevice_notifier(&mirred_device_notifier);
680}
681
682module_init(mirred_init_module);
683module_exit(mirred_cleanup_module);