Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_api.c Packet classifier API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/kmod.h>
21#include <linux/slab.h>
22#include <linux/idr.h>
23#include <linux/jhash.h>
24#include <linux/rculist.h>
25#include <linux/rhashtable.h>
26#include <net/net_namespace.h>
27#include <net/sock.h>
28#include <net/netlink.h>
29#include <net/pkt_sched.h>
30#include <net/pkt_cls.h>
31#include <net/tc_act/tc_pedit.h>
32#include <net/tc_act/tc_mirred.h>
33#include <net/tc_act/tc_vlan.h>
34#include <net/tc_act/tc_tunnel_key.h>
35#include <net/tc_act/tc_csum.h>
36#include <net/tc_act/tc_gact.h>
37#include <net/tc_act/tc_police.h>
38#include <net/tc_act/tc_sample.h>
39#include <net/tc_act/tc_skbedit.h>
40#include <net/tc_act/tc_ct.h>
41#include <net/tc_act/tc_mpls.h>
42#include <net/tc_act/tc_gate.h>
43#include <net/flow_offload.h>
44#include <net/tc_wrapper.h>
45
46/* The list of all installed classifier types */
47static LIST_HEAD(tcf_proto_base);
48
49/* Protects list of registered TC modules. It is pure SMP lock. */
50static DEFINE_RWLOCK(cls_mod_lock);
51
52static struct xarray tcf_exts_miss_cookies_xa;
53struct tcf_exts_miss_cookie_node {
54 const struct tcf_chain *chain;
55 const struct tcf_proto *tp;
56 const struct tcf_exts *exts;
57 u32 chain_index;
58 u32 tp_prio;
59 u32 handle;
60 u32 miss_cookie_base;
61 struct rcu_head rcu;
62};
63
64/* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65 * action index in the exts tc actions array.
66 */
67union tcf_exts_miss_cookie {
68 struct {
69 u32 miss_cookie_base;
70 u32 act_index;
71 };
72 u64 miss_cookie;
73};
74
75#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76static int
77tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 u32 handle)
79{
80 struct tcf_exts_miss_cookie_node *n;
81 static u32 next;
82 int err;
83
84 if (WARN_ON(!handle || !tp->ops->get_exts))
85 return -EINVAL;
86
87 n = kzalloc(sizeof(*n), GFP_KERNEL);
88 if (!n)
89 return -ENOMEM;
90
91 n->chain_index = tp->chain->index;
92 n->chain = tp->chain;
93 n->tp_prio = tp->prio;
94 n->tp = tp;
95 n->exts = exts;
96 n->handle = handle;
97
98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 n, xa_limit_32b, &next, GFP_KERNEL);
100 if (err)
101 goto err_xa_alloc;
102
103 exts->miss_cookie_node = n;
104 return 0;
105
106err_xa_alloc:
107 kfree(n);
108 return err;
109}
110
111static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112{
113 struct tcf_exts_miss_cookie_node *n;
114
115 if (!exts->miss_cookie_node)
116 return;
117
118 n = exts->miss_cookie_node;
119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 kfree_rcu(n, rcu);
121}
122
123static struct tcf_exts_miss_cookie_node *
124tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125{
126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127
128 *act_index = mc.act_index;
129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130}
131#else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132static int
133tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 u32 handle)
135{
136 return 0;
137}
138
139static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140{
141}
142#endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143
144static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145{
146 union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147
148 if (!miss_cookie_base)
149 return 0;
150
151 mc.miss_cookie_base = miss_cookie_base;
152 return mc.miss_cookie;
153}
154
155#ifdef CONFIG_NET_CLS_ACT
156DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157EXPORT_SYMBOL(tc_skb_ext_tc);
158
159void tc_skb_ext_tc_enable(void)
160{
161 static_branch_inc(&tc_skb_ext_tc);
162}
163EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164
165void tc_skb_ext_tc_disable(void)
166{
167 static_branch_dec(&tc_skb_ext_tc);
168}
169EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170#endif
171
172static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173{
174 return jhash_3words(tp->chain->index, tp->prio,
175 (__force __u32)tp->protocol, 0);
176}
177
178static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 struct tcf_proto *tp)
180{
181 struct tcf_block *block = chain->block;
182
183 mutex_lock(&block->proto_destroy_lock);
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 destroy_obj_hashfn(tp));
186 mutex_unlock(&block->proto_destroy_lock);
187}
188
189static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 const struct tcf_proto *tp2)
191{
192 return tp1->chain->index == tp2->chain->index &&
193 tp1->prio == tp2->prio &&
194 tp1->protocol == tp2->protocol;
195}
196
197static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 struct tcf_proto *tp)
199{
200 u32 hash = destroy_obj_hashfn(tp);
201 struct tcf_proto *iter;
202 bool found = false;
203
204 rcu_read_lock();
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 destroy_ht_node, hash) {
207 if (tcf_proto_cmp(tp, iter)) {
208 found = true;
209 break;
210 }
211 }
212 rcu_read_unlock();
213
214 return found;
215}
216
217static void
218tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219{
220 struct tcf_block *block = chain->block;
221
222 mutex_lock(&block->proto_destroy_lock);
223 if (hash_hashed(&tp->destroy_ht_node))
224 hash_del_rcu(&tp->destroy_ht_node);
225 mutex_unlock(&block->proto_destroy_lock);
226}
227
228/* Find classifier type by string name */
229
230static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231{
232 const struct tcf_proto_ops *t, *res = NULL;
233
234 if (kind) {
235 read_lock(&cls_mod_lock);
236 list_for_each_entry(t, &tcf_proto_base, head) {
237 if (strcmp(kind, t->kind) == 0) {
238 if (try_module_get(t->owner))
239 res = t;
240 break;
241 }
242 }
243 read_unlock(&cls_mod_lock);
244 }
245 return res;
246}
247
248static const struct tcf_proto_ops *
249tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 struct netlink_ext_ack *extack)
251{
252 const struct tcf_proto_ops *ops;
253
254 ops = __tcf_proto_lookup_ops(kind);
255 if (ops)
256 return ops;
257#ifdef CONFIG_MODULES
258 if (rtnl_held)
259 rtnl_unlock();
260 request_module("cls_%s", kind);
261 if (rtnl_held)
262 rtnl_lock();
263 ops = __tcf_proto_lookup_ops(kind);
264 /* We dropped the RTNL semaphore in order to perform
265 * the module load. So, even if we succeeded in loading
266 * the module we have to replay the request. We indicate
267 * this using -EAGAIN.
268 */
269 if (ops) {
270 module_put(ops->owner);
271 return ERR_PTR(-EAGAIN);
272 }
273#endif
274 NL_SET_ERR_MSG(extack, "TC classifier not found");
275 return ERR_PTR(-ENOENT);
276}
277
278/* Register(unregister) new classifier type */
279
280int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281{
282 struct tcf_proto_ops *t;
283 int rc = -EEXIST;
284
285 write_lock(&cls_mod_lock);
286 list_for_each_entry(t, &tcf_proto_base, head)
287 if (!strcmp(ops->kind, t->kind))
288 goto out;
289
290 list_add_tail(&ops->head, &tcf_proto_base);
291 rc = 0;
292out:
293 write_unlock(&cls_mod_lock);
294 return rc;
295}
296EXPORT_SYMBOL(register_tcf_proto_ops);
297
298static struct workqueue_struct *tc_filter_wq;
299
300void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301{
302 struct tcf_proto_ops *t;
303 int rc = -ENOENT;
304
305 /* Wait for outstanding call_rcu()s, if any, from a
306 * tcf_proto_ops's destroy() handler.
307 */
308 rcu_barrier();
309 flush_workqueue(tc_filter_wq);
310
311 write_lock(&cls_mod_lock);
312 list_for_each_entry(t, &tcf_proto_base, head) {
313 if (t == ops) {
314 list_del(&t->head);
315 rc = 0;
316 break;
317 }
318 }
319 write_unlock(&cls_mod_lock);
320
321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322}
323EXPORT_SYMBOL(unregister_tcf_proto_ops);
324
325bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326{
327 INIT_RCU_WORK(rwork, func);
328 return queue_rcu_work(tc_filter_wq, rwork);
329}
330EXPORT_SYMBOL(tcf_queue_work);
331
332/* Select new prio value from the range, managed by kernel. */
333
334static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335{
336 u32 first = TC_H_MAKE(0xC0000000U, 0U);
337
338 if (tp)
339 first = tp->prio - 1;
340
341 return TC_H_MAJ(first);
342}
343
344static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345{
346 if (kind)
347 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 memset(name, 0, IFNAMSIZ);
349 return false;
350}
351
352static bool tcf_proto_is_unlocked(const char *kind)
353{
354 const struct tcf_proto_ops *ops;
355 bool ret;
356
357 if (strlen(kind) == 0)
358 return false;
359
360 ops = tcf_proto_lookup_ops(kind, false, NULL);
361 /* On error return false to take rtnl lock. Proto lookup/create
362 * functions will perform lookup again and properly handle errors.
363 */
364 if (IS_ERR(ops))
365 return false;
366
367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 module_put(ops->owner);
369 return ret;
370}
371
372static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 u32 prio, struct tcf_chain *chain,
374 bool rtnl_held,
375 struct netlink_ext_ack *extack)
376{
377 struct tcf_proto *tp;
378 int err;
379
380 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 if (!tp)
382 return ERR_PTR(-ENOBUFS);
383
384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 if (IS_ERR(tp->ops)) {
386 err = PTR_ERR(tp->ops);
387 goto errout;
388 }
389 tp->classify = tp->ops->classify;
390 tp->protocol = protocol;
391 tp->prio = prio;
392 tp->chain = chain;
393 spin_lock_init(&tp->lock);
394 refcount_set(&tp->refcnt, 1);
395
396 err = tp->ops->init(tp);
397 if (err) {
398 module_put(tp->ops->owner);
399 goto errout;
400 }
401 return tp;
402
403errout:
404 kfree(tp);
405 return ERR_PTR(err);
406}
407
408static void tcf_proto_get(struct tcf_proto *tp)
409{
410 refcount_inc(&tp->refcnt);
411}
412
413static void tcf_chain_put(struct tcf_chain *chain);
414
415static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
416 bool sig_destroy, struct netlink_ext_ack *extack)
417{
418 tp->ops->destroy(tp, rtnl_held, extack);
419 if (sig_destroy)
420 tcf_proto_signal_destroyed(tp->chain, tp);
421 tcf_chain_put(tp->chain);
422 module_put(tp->ops->owner);
423 kfree_rcu(tp, rcu);
424}
425
426static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
427 struct netlink_ext_ack *extack)
428{
429 if (refcount_dec_and_test(&tp->refcnt))
430 tcf_proto_destroy(tp, rtnl_held, true, extack);
431}
432
433static bool tcf_proto_check_delete(struct tcf_proto *tp)
434{
435 if (tp->ops->delete_empty)
436 return tp->ops->delete_empty(tp);
437
438 tp->deleting = true;
439 return tp->deleting;
440}
441
442static void tcf_proto_mark_delete(struct tcf_proto *tp)
443{
444 spin_lock(&tp->lock);
445 tp->deleting = true;
446 spin_unlock(&tp->lock);
447}
448
449static bool tcf_proto_is_deleting(struct tcf_proto *tp)
450{
451 bool deleting;
452
453 spin_lock(&tp->lock);
454 deleting = tp->deleting;
455 spin_unlock(&tp->lock);
456
457 return deleting;
458}
459
460#define ASSERT_BLOCK_LOCKED(block) \
461 lockdep_assert_held(&(block)->lock)
462
463struct tcf_filter_chain_list_item {
464 struct list_head list;
465 tcf_chain_head_change_t *chain_head_change;
466 void *chain_head_change_priv;
467};
468
469static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
470 u32 chain_index)
471{
472 struct tcf_chain *chain;
473
474 ASSERT_BLOCK_LOCKED(block);
475
476 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
477 if (!chain)
478 return NULL;
479 list_add_tail_rcu(&chain->list, &block->chain_list);
480 mutex_init(&chain->filter_chain_lock);
481 chain->block = block;
482 chain->index = chain_index;
483 chain->refcnt = 1;
484 if (!chain->index)
485 block->chain0.chain = chain;
486 return chain;
487}
488
489static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
490 struct tcf_proto *tp_head)
491{
492 if (item->chain_head_change)
493 item->chain_head_change(tp_head, item->chain_head_change_priv);
494}
495
496static void tcf_chain0_head_change(struct tcf_chain *chain,
497 struct tcf_proto *tp_head)
498{
499 struct tcf_filter_chain_list_item *item;
500 struct tcf_block *block = chain->block;
501
502 if (chain->index)
503 return;
504
505 mutex_lock(&block->lock);
506 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
507 tcf_chain_head_change_item(item, tp_head);
508 mutex_unlock(&block->lock);
509}
510
511/* Returns true if block can be safely freed. */
512
513static bool tcf_chain_detach(struct tcf_chain *chain)
514{
515 struct tcf_block *block = chain->block;
516
517 ASSERT_BLOCK_LOCKED(block);
518
519 list_del_rcu(&chain->list);
520 if (!chain->index)
521 block->chain0.chain = NULL;
522
523 if (list_empty(&block->chain_list) &&
524 refcount_read(&block->refcnt) == 0)
525 return true;
526
527 return false;
528}
529
530static void tcf_block_destroy(struct tcf_block *block)
531{
532 mutex_destroy(&block->lock);
533 mutex_destroy(&block->proto_destroy_lock);
534 xa_destroy(&block->ports);
535 kfree_rcu(block, rcu);
536}
537
538static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
539{
540 struct tcf_block *block = chain->block;
541
542 mutex_destroy(&chain->filter_chain_lock);
543 kfree_rcu(chain, rcu);
544 if (free_block)
545 tcf_block_destroy(block);
546}
547
548static void tcf_chain_hold(struct tcf_chain *chain)
549{
550 ASSERT_BLOCK_LOCKED(chain->block);
551
552 ++chain->refcnt;
553}
554
555static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
556{
557 ASSERT_BLOCK_LOCKED(chain->block);
558
559 /* In case all the references are action references, this
560 * chain should not be shown to the user.
561 */
562 return chain->refcnt == chain->action_refcnt;
563}
564
565static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
566 u32 chain_index)
567{
568 struct tcf_chain *chain;
569
570 ASSERT_BLOCK_LOCKED(block);
571
572 list_for_each_entry(chain, &block->chain_list, list) {
573 if (chain->index == chain_index)
574 return chain;
575 }
576 return NULL;
577}
578
579#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
580static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
581 u32 chain_index)
582{
583 struct tcf_chain *chain;
584
585 list_for_each_entry_rcu(chain, &block->chain_list, list) {
586 if (chain->index == chain_index)
587 return chain;
588 }
589 return NULL;
590}
591#endif
592
593static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
594 u32 seq, u16 flags, int event, bool unicast,
595 struct netlink_ext_ack *extack);
596
597static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
598 u32 chain_index, bool create,
599 bool by_act)
600{
601 struct tcf_chain *chain = NULL;
602 bool is_first_reference;
603
604 mutex_lock(&block->lock);
605 chain = tcf_chain_lookup(block, chain_index);
606 if (chain) {
607 tcf_chain_hold(chain);
608 } else {
609 if (!create)
610 goto errout;
611 chain = tcf_chain_create(block, chain_index);
612 if (!chain)
613 goto errout;
614 }
615
616 if (by_act)
617 ++chain->action_refcnt;
618 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
619 mutex_unlock(&block->lock);
620
621 /* Send notification only in case we got the first
622 * non-action reference. Until then, the chain acts only as
623 * a placeholder for actions pointing to it and user ought
624 * not know about them.
625 */
626 if (is_first_reference && !by_act)
627 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
628 RTM_NEWCHAIN, false, NULL);
629
630 return chain;
631
632errout:
633 mutex_unlock(&block->lock);
634 return chain;
635}
636
637static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
638 bool create)
639{
640 return __tcf_chain_get(block, chain_index, create, false);
641}
642
643struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
644{
645 return __tcf_chain_get(block, chain_index, true, true);
646}
647EXPORT_SYMBOL(tcf_chain_get_by_act);
648
649static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
650 void *tmplt_priv);
651static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
652 void *tmplt_priv, u32 chain_index,
653 struct tcf_block *block, struct sk_buff *oskb,
654 u32 seq, u16 flags);
655
656static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
657 bool explicitly_created)
658{
659 struct tcf_block *block = chain->block;
660 const struct tcf_proto_ops *tmplt_ops;
661 unsigned int refcnt, non_act_refcnt;
662 bool free_block = false;
663 void *tmplt_priv;
664
665 mutex_lock(&block->lock);
666 if (explicitly_created) {
667 if (!chain->explicitly_created) {
668 mutex_unlock(&block->lock);
669 return;
670 }
671 chain->explicitly_created = false;
672 }
673
674 if (by_act)
675 chain->action_refcnt--;
676
677 /* tc_chain_notify_delete can't be called while holding block lock.
678 * However, when block is unlocked chain can be changed concurrently, so
679 * save these to temporary variables.
680 */
681 refcnt = --chain->refcnt;
682 non_act_refcnt = refcnt - chain->action_refcnt;
683 tmplt_ops = chain->tmplt_ops;
684 tmplt_priv = chain->tmplt_priv;
685
686 if (non_act_refcnt == chain->explicitly_created && !by_act) {
687 if (non_act_refcnt == 0)
688 tc_chain_notify_delete(tmplt_ops, tmplt_priv,
689 chain->index, block, NULL, 0, 0);
690 /* Last reference to chain, no need to lock. */
691 chain->flushing = false;
692 }
693
694 if (refcnt == 0)
695 free_block = tcf_chain_detach(chain);
696 mutex_unlock(&block->lock);
697
698 if (refcnt == 0) {
699 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
700 tcf_chain_destroy(chain, free_block);
701 }
702}
703
704static void tcf_chain_put(struct tcf_chain *chain)
705{
706 __tcf_chain_put(chain, false, false);
707}
708
709void tcf_chain_put_by_act(struct tcf_chain *chain)
710{
711 __tcf_chain_put(chain, true, false);
712}
713EXPORT_SYMBOL(tcf_chain_put_by_act);
714
715static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
716{
717 __tcf_chain_put(chain, false, true);
718}
719
720static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
721{
722 struct tcf_proto *tp, *tp_next;
723
724 mutex_lock(&chain->filter_chain_lock);
725 tp = tcf_chain_dereference(chain->filter_chain, chain);
726 while (tp) {
727 tp_next = rcu_dereference_protected(tp->next, 1);
728 tcf_proto_signal_destroying(chain, tp);
729 tp = tp_next;
730 }
731 tp = tcf_chain_dereference(chain->filter_chain, chain);
732 RCU_INIT_POINTER(chain->filter_chain, NULL);
733 tcf_chain0_head_change(chain, NULL);
734 chain->flushing = true;
735 mutex_unlock(&chain->filter_chain_lock);
736
737 while (tp) {
738 tp_next = rcu_dereference_protected(tp->next, 1);
739 tcf_proto_put(tp, rtnl_held, NULL);
740 tp = tp_next;
741 }
742}
743
744static int tcf_block_setup(struct tcf_block *block,
745 struct flow_block_offload *bo);
746
747static void tcf_block_offload_init(struct flow_block_offload *bo,
748 struct net_device *dev, struct Qdisc *sch,
749 enum flow_block_command command,
750 enum flow_block_binder_type binder_type,
751 struct flow_block *flow_block,
752 bool shared, struct netlink_ext_ack *extack)
753{
754 bo->net = dev_net(dev);
755 bo->command = command;
756 bo->binder_type = binder_type;
757 bo->block = flow_block;
758 bo->block_shared = shared;
759 bo->extack = extack;
760 bo->sch = sch;
761 bo->cb_list_head = &flow_block->cb_list;
762 INIT_LIST_HEAD(&bo->cb_list);
763}
764
765static void tcf_block_unbind(struct tcf_block *block,
766 struct flow_block_offload *bo);
767
768static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
769{
770 struct tcf_block *block = block_cb->indr.data;
771 struct net_device *dev = block_cb->indr.dev;
772 struct Qdisc *sch = block_cb->indr.sch;
773 struct netlink_ext_ack extack = {};
774 struct flow_block_offload bo = {};
775
776 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
777 block_cb->indr.binder_type,
778 &block->flow_block, tcf_block_shared(block),
779 &extack);
780 rtnl_lock();
781 down_write(&block->cb_lock);
782 list_del(&block_cb->driver_list);
783 list_move(&block_cb->list, &bo.cb_list);
784 tcf_block_unbind(block, &bo);
785 up_write(&block->cb_lock);
786 rtnl_unlock();
787}
788
789static bool tcf_block_offload_in_use(struct tcf_block *block)
790{
791 return atomic_read(&block->offloadcnt);
792}
793
794static int tcf_block_offload_cmd(struct tcf_block *block,
795 struct net_device *dev, struct Qdisc *sch,
796 struct tcf_block_ext_info *ei,
797 enum flow_block_command command,
798 struct netlink_ext_ack *extack)
799{
800 struct flow_block_offload bo = {};
801
802 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
803 &block->flow_block, tcf_block_shared(block),
804 extack);
805
806 if (dev->netdev_ops->ndo_setup_tc) {
807 int err;
808
809 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
810 if (err < 0) {
811 if (err != -EOPNOTSUPP)
812 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
813 return err;
814 }
815
816 return tcf_block_setup(block, &bo);
817 }
818
819 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
820 tc_block_indr_cleanup);
821 tcf_block_setup(block, &bo);
822
823 return -EOPNOTSUPP;
824}
825
826static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
827 struct tcf_block_ext_info *ei,
828 struct netlink_ext_ack *extack)
829{
830 struct net_device *dev = q->dev_queue->dev;
831 int err;
832
833 down_write(&block->cb_lock);
834
835 /* If tc offload feature is disabled and the block we try to bind
836 * to already has some offloaded filters, forbid to bind.
837 */
838 if (dev->netdev_ops->ndo_setup_tc &&
839 !tc_can_offload(dev) &&
840 tcf_block_offload_in_use(block)) {
841 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
842 err = -EOPNOTSUPP;
843 goto err_unlock;
844 }
845
846 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
847 if (err == -EOPNOTSUPP)
848 goto no_offload_dev_inc;
849 if (err)
850 goto err_unlock;
851
852 up_write(&block->cb_lock);
853 return 0;
854
855no_offload_dev_inc:
856 if (tcf_block_offload_in_use(block))
857 goto err_unlock;
858
859 err = 0;
860 block->nooffloaddevcnt++;
861err_unlock:
862 up_write(&block->cb_lock);
863 return err;
864}
865
866static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
867 struct tcf_block_ext_info *ei)
868{
869 struct net_device *dev = q->dev_queue->dev;
870 int err;
871
872 down_write(&block->cb_lock);
873 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
874 if (err == -EOPNOTSUPP)
875 goto no_offload_dev_dec;
876 up_write(&block->cb_lock);
877 return;
878
879no_offload_dev_dec:
880 WARN_ON(block->nooffloaddevcnt-- == 0);
881 up_write(&block->cb_lock);
882}
883
884static int
885tcf_chain0_head_change_cb_add(struct tcf_block *block,
886 struct tcf_block_ext_info *ei,
887 struct netlink_ext_ack *extack)
888{
889 struct tcf_filter_chain_list_item *item;
890 struct tcf_chain *chain0;
891
892 item = kmalloc(sizeof(*item), GFP_KERNEL);
893 if (!item) {
894 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
895 return -ENOMEM;
896 }
897 item->chain_head_change = ei->chain_head_change;
898 item->chain_head_change_priv = ei->chain_head_change_priv;
899
900 mutex_lock(&block->lock);
901 chain0 = block->chain0.chain;
902 if (chain0)
903 tcf_chain_hold(chain0);
904 else
905 list_add(&item->list, &block->chain0.filter_chain_list);
906 mutex_unlock(&block->lock);
907
908 if (chain0) {
909 struct tcf_proto *tp_head;
910
911 mutex_lock(&chain0->filter_chain_lock);
912
913 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
914 if (tp_head)
915 tcf_chain_head_change_item(item, tp_head);
916
917 mutex_lock(&block->lock);
918 list_add(&item->list, &block->chain0.filter_chain_list);
919 mutex_unlock(&block->lock);
920
921 mutex_unlock(&chain0->filter_chain_lock);
922 tcf_chain_put(chain0);
923 }
924
925 return 0;
926}
927
928static void
929tcf_chain0_head_change_cb_del(struct tcf_block *block,
930 struct tcf_block_ext_info *ei)
931{
932 struct tcf_filter_chain_list_item *item;
933
934 mutex_lock(&block->lock);
935 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
936 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
937 (item->chain_head_change == ei->chain_head_change &&
938 item->chain_head_change_priv == ei->chain_head_change_priv)) {
939 if (block->chain0.chain)
940 tcf_chain_head_change_item(item, NULL);
941 list_del(&item->list);
942 mutex_unlock(&block->lock);
943
944 kfree(item);
945 return;
946 }
947 }
948 mutex_unlock(&block->lock);
949 WARN_ON(1);
950}
951
952struct tcf_net {
953 spinlock_t idr_lock; /* Protects idr */
954 struct idr idr;
955};
956
957static unsigned int tcf_net_id;
958
959static int tcf_block_insert(struct tcf_block *block, struct net *net,
960 struct netlink_ext_ack *extack)
961{
962 struct tcf_net *tn = net_generic(net, tcf_net_id);
963 int err;
964
965 idr_preload(GFP_KERNEL);
966 spin_lock(&tn->idr_lock);
967 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
968 GFP_NOWAIT);
969 spin_unlock(&tn->idr_lock);
970 idr_preload_end();
971
972 return err;
973}
974
975static void tcf_block_remove(struct tcf_block *block, struct net *net)
976{
977 struct tcf_net *tn = net_generic(net, tcf_net_id);
978
979 spin_lock(&tn->idr_lock);
980 idr_remove(&tn->idr, block->index);
981 spin_unlock(&tn->idr_lock);
982}
983
984static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
985 u32 block_index,
986 struct netlink_ext_ack *extack)
987{
988 struct tcf_block *block;
989
990 block = kzalloc(sizeof(*block), GFP_KERNEL);
991 if (!block) {
992 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
993 return ERR_PTR(-ENOMEM);
994 }
995 mutex_init(&block->lock);
996 mutex_init(&block->proto_destroy_lock);
997 init_rwsem(&block->cb_lock);
998 flow_block_init(&block->flow_block);
999 INIT_LIST_HEAD(&block->chain_list);
1000 INIT_LIST_HEAD(&block->owner_list);
1001 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1002
1003 refcount_set(&block->refcnt, 1);
1004 block->net = net;
1005 block->index = block_index;
1006 xa_init(&block->ports);
1007
1008 /* Don't store q pointer for blocks which are shared */
1009 if (!tcf_block_shared(block))
1010 block->q = q;
1011 return block;
1012}
1013
1014struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1015{
1016 struct tcf_net *tn = net_generic(net, tcf_net_id);
1017
1018 return idr_find(&tn->idr, block_index);
1019}
1020EXPORT_SYMBOL(tcf_block_lookup);
1021
1022static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1023{
1024 struct tcf_block *block;
1025
1026 rcu_read_lock();
1027 block = tcf_block_lookup(net, block_index);
1028 if (block && !refcount_inc_not_zero(&block->refcnt))
1029 block = NULL;
1030 rcu_read_unlock();
1031
1032 return block;
1033}
1034
1035static struct tcf_chain *
1036__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1037{
1038 mutex_lock(&block->lock);
1039 if (chain)
1040 chain = list_is_last(&chain->list, &block->chain_list) ?
1041 NULL : list_next_entry(chain, list);
1042 else
1043 chain = list_first_entry_or_null(&block->chain_list,
1044 struct tcf_chain, list);
1045
1046 /* skip all action-only chains */
1047 while (chain && tcf_chain_held_by_acts_only(chain))
1048 chain = list_is_last(&chain->list, &block->chain_list) ?
1049 NULL : list_next_entry(chain, list);
1050
1051 if (chain)
1052 tcf_chain_hold(chain);
1053 mutex_unlock(&block->lock);
1054
1055 return chain;
1056}
1057
1058/* Function to be used by all clients that want to iterate over all chains on
1059 * block. It properly obtains block->lock and takes reference to chain before
1060 * returning it. Users of this function must be tolerant to concurrent chain
1061 * insertion/deletion or ensure that no concurrent chain modification is
1062 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1063 * consistent dump because rtnl lock is released each time skb is filled with
1064 * data and sent to user-space.
1065 */
1066
1067struct tcf_chain *
1068tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1069{
1070 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1071
1072 if (chain)
1073 tcf_chain_put(chain);
1074
1075 return chain_next;
1076}
1077EXPORT_SYMBOL(tcf_get_next_chain);
1078
1079static struct tcf_proto *
1080__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1081{
1082 u32 prio = 0;
1083
1084 ASSERT_RTNL();
1085 mutex_lock(&chain->filter_chain_lock);
1086
1087 if (!tp) {
1088 tp = tcf_chain_dereference(chain->filter_chain, chain);
1089 } else if (tcf_proto_is_deleting(tp)) {
1090 /* 'deleting' flag is set and chain->filter_chain_lock was
1091 * unlocked, which means next pointer could be invalid. Restart
1092 * search.
1093 */
1094 prio = tp->prio + 1;
1095 tp = tcf_chain_dereference(chain->filter_chain, chain);
1096
1097 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1098 if (!tp->deleting && tp->prio >= prio)
1099 break;
1100 } else {
1101 tp = tcf_chain_dereference(tp->next, chain);
1102 }
1103
1104 if (tp)
1105 tcf_proto_get(tp);
1106
1107 mutex_unlock(&chain->filter_chain_lock);
1108
1109 return tp;
1110}
1111
1112/* Function to be used by all clients that want to iterate over all tp's on
1113 * chain. Users of this function must be tolerant to concurrent tp
1114 * insertion/deletion or ensure that no concurrent chain modification is
1115 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1116 * consistent dump because rtnl lock is released each time skb is filled with
1117 * data and sent to user-space.
1118 */
1119
1120struct tcf_proto *
1121tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1122{
1123 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1124
1125 if (tp)
1126 tcf_proto_put(tp, true, NULL);
1127
1128 return tp_next;
1129}
1130EXPORT_SYMBOL(tcf_get_next_proto);
1131
1132static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1133{
1134 struct tcf_chain *chain;
1135
1136 /* Last reference to block. At this point chains cannot be added or
1137 * removed concurrently.
1138 */
1139 for (chain = tcf_get_next_chain(block, NULL);
1140 chain;
1141 chain = tcf_get_next_chain(block, chain)) {
1142 tcf_chain_put_explicitly_created(chain);
1143 tcf_chain_flush(chain, rtnl_held);
1144 }
1145}
1146
1147/* Lookup Qdisc and increments its reference counter.
1148 * Set parent, if necessary.
1149 */
1150
1151static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1152 u32 *parent, int ifindex, bool rtnl_held,
1153 struct netlink_ext_ack *extack)
1154{
1155 const struct Qdisc_class_ops *cops;
1156 struct net_device *dev;
1157 int err = 0;
1158
1159 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1160 return 0;
1161
1162 rcu_read_lock();
1163
1164 /* Find link */
1165 dev = dev_get_by_index_rcu(net, ifindex);
1166 if (!dev) {
1167 rcu_read_unlock();
1168 return -ENODEV;
1169 }
1170
1171 /* Find qdisc */
1172 if (!*parent) {
1173 *q = rcu_dereference(dev->qdisc);
1174 *parent = (*q)->handle;
1175 } else {
1176 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1177 if (!*q) {
1178 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1179 err = -EINVAL;
1180 goto errout_rcu;
1181 }
1182 }
1183
1184 *q = qdisc_refcount_inc_nz(*q);
1185 if (!*q) {
1186 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1187 err = -EINVAL;
1188 goto errout_rcu;
1189 }
1190
1191 /* Is it classful? */
1192 cops = (*q)->ops->cl_ops;
1193 if (!cops) {
1194 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1195 err = -EINVAL;
1196 goto errout_qdisc;
1197 }
1198
1199 if (!cops->tcf_block) {
1200 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1201 err = -EOPNOTSUPP;
1202 goto errout_qdisc;
1203 }
1204
1205errout_rcu:
1206 /* At this point we know that qdisc is not noop_qdisc,
1207 * which means that qdisc holds a reference to net_device
1208 * and we hold a reference to qdisc, so it is safe to release
1209 * rcu read lock.
1210 */
1211 rcu_read_unlock();
1212 return err;
1213
1214errout_qdisc:
1215 rcu_read_unlock();
1216
1217 if (rtnl_held)
1218 qdisc_put(*q);
1219 else
1220 qdisc_put_unlocked(*q);
1221 *q = NULL;
1222
1223 return err;
1224}
1225
1226static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1227 int ifindex, struct netlink_ext_ack *extack)
1228{
1229 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1230 return 0;
1231
1232 /* Do we search for filter, attached to class? */
1233 if (TC_H_MIN(parent)) {
1234 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1235
1236 *cl = cops->find(q, parent);
1237 if (*cl == 0) {
1238 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1239 return -ENOENT;
1240 }
1241 }
1242
1243 return 0;
1244}
1245
1246static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1247 unsigned long cl, int ifindex,
1248 u32 block_index,
1249 struct netlink_ext_ack *extack)
1250{
1251 struct tcf_block *block;
1252
1253 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1254 block = tcf_block_refcnt_get(net, block_index);
1255 if (!block) {
1256 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1257 return ERR_PTR(-EINVAL);
1258 }
1259 } else {
1260 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1261
1262 block = cops->tcf_block(q, cl, extack);
1263 if (!block)
1264 return ERR_PTR(-EINVAL);
1265
1266 if (tcf_block_shared(block)) {
1267 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1268 return ERR_PTR(-EOPNOTSUPP);
1269 }
1270
1271 /* Always take reference to block in order to support execution
1272 * of rules update path of cls API without rtnl lock. Caller
1273 * must release block when it is finished using it. 'if' block
1274 * of this conditional obtain reference to block by calling
1275 * tcf_block_refcnt_get().
1276 */
1277 refcount_inc(&block->refcnt);
1278 }
1279
1280 return block;
1281}
1282
1283static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1284 struct tcf_block_ext_info *ei, bool rtnl_held)
1285{
1286 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1287 /* Flushing/putting all chains will cause the block to be
1288 * deallocated when last chain is freed. However, if chain_list
1289 * is empty, block has to be manually deallocated. After block
1290 * reference counter reached 0, it is no longer possible to
1291 * increment it or add new chains to block.
1292 */
1293 bool free_block = list_empty(&block->chain_list);
1294
1295 mutex_unlock(&block->lock);
1296 if (tcf_block_shared(block))
1297 tcf_block_remove(block, block->net);
1298
1299 if (q)
1300 tcf_block_offload_unbind(block, q, ei);
1301
1302 if (free_block)
1303 tcf_block_destroy(block);
1304 else
1305 tcf_block_flush_all_chains(block, rtnl_held);
1306 } else if (q) {
1307 tcf_block_offload_unbind(block, q, ei);
1308 }
1309}
1310
1311static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1312{
1313 __tcf_block_put(block, NULL, NULL, rtnl_held);
1314}
1315
1316/* Find tcf block.
1317 * Set q, parent, cl when appropriate.
1318 */
1319
1320static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1321 u32 *parent, unsigned long *cl,
1322 int ifindex, u32 block_index,
1323 struct netlink_ext_ack *extack)
1324{
1325 struct tcf_block *block;
1326 int err = 0;
1327
1328 ASSERT_RTNL();
1329
1330 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1331 if (err)
1332 goto errout;
1333
1334 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1335 if (err)
1336 goto errout_qdisc;
1337
1338 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1339 if (IS_ERR(block)) {
1340 err = PTR_ERR(block);
1341 goto errout_qdisc;
1342 }
1343
1344 return block;
1345
1346errout_qdisc:
1347 if (*q)
1348 qdisc_put(*q);
1349errout:
1350 *q = NULL;
1351 return ERR_PTR(err);
1352}
1353
1354static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1355 bool rtnl_held)
1356{
1357 if (!IS_ERR_OR_NULL(block))
1358 tcf_block_refcnt_put(block, rtnl_held);
1359
1360 if (q) {
1361 if (rtnl_held)
1362 qdisc_put(q);
1363 else
1364 qdisc_put_unlocked(q);
1365 }
1366}
1367
1368struct tcf_block_owner_item {
1369 struct list_head list;
1370 struct Qdisc *q;
1371 enum flow_block_binder_type binder_type;
1372};
1373
1374static void
1375tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1376 struct Qdisc *q,
1377 enum flow_block_binder_type binder_type)
1378{
1379 if (block->keep_dst &&
1380 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1381 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1382 netif_keep_dst(qdisc_dev(q));
1383}
1384
1385void tcf_block_netif_keep_dst(struct tcf_block *block)
1386{
1387 struct tcf_block_owner_item *item;
1388
1389 block->keep_dst = true;
1390 list_for_each_entry(item, &block->owner_list, list)
1391 tcf_block_owner_netif_keep_dst(block, item->q,
1392 item->binder_type);
1393}
1394EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1395
1396static int tcf_block_owner_add(struct tcf_block *block,
1397 struct Qdisc *q,
1398 enum flow_block_binder_type binder_type)
1399{
1400 struct tcf_block_owner_item *item;
1401
1402 item = kmalloc(sizeof(*item), GFP_KERNEL);
1403 if (!item)
1404 return -ENOMEM;
1405 item->q = q;
1406 item->binder_type = binder_type;
1407 list_add(&item->list, &block->owner_list);
1408 return 0;
1409}
1410
1411static void tcf_block_owner_del(struct tcf_block *block,
1412 struct Qdisc *q,
1413 enum flow_block_binder_type binder_type)
1414{
1415 struct tcf_block_owner_item *item;
1416
1417 list_for_each_entry(item, &block->owner_list, list) {
1418 if (item->q == q && item->binder_type == binder_type) {
1419 list_del(&item->list);
1420 kfree(item);
1421 return;
1422 }
1423 }
1424 WARN_ON(1);
1425}
1426
1427static bool tcf_block_tracks_dev(struct tcf_block *block,
1428 struct tcf_block_ext_info *ei)
1429{
1430 return tcf_block_shared(block) &&
1431 (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1432 ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1433}
1434
1435int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1436 struct tcf_block_ext_info *ei,
1437 struct netlink_ext_ack *extack)
1438{
1439 struct net_device *dev = qdisc_dev(q);
1440 struct net *net = qdisc_net(q);
1441 struct tcf_block *block = NULL;
1442 int err;
1443
1444 if (ei->block_index)
1445 /* block_index not 0 means the shared block is requested */
1446 block = tcf_block_refcnt_get(net, ei->block_index);
1447
1448 if (!block) {
1449 block = tcf_block_create(net, q, ei->block_index, extack);
1450 if (IS_ERR(block))
1451 return PTR_ERR(block);
1452 if (tcf_block_shared(block)) {
1453 err = tcf_block_insert(block, net, extack);
1454 if (err)
1455 goto err_block_insert;
1456 }
1457 }
1458
1459 err = tcf_block_owner_add(block, q, ei->binder_type);
1460 if (err)
1461 goto err_block_owner_add;
1462
1463 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1464
1465 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1466 if (err)
1467 goto err_chain0_head_change_cb_add;
1468
1469 err = tcf_block_offload_bind(block, q, ei, extack);
1470 if (err)
1471 goto err_block_offload_bind;
1472
1473 if (tcf_block_tracks_dev(block, ei)) {
1474 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1475 if (err) {
1476 NL_SET_ERR_MSG(extack, "block dev insert failed");
1477 goto err_dev_insert;
1478 }
1479 }
1480
1481 *p_block = block;
1482 return 0;
1483
1484err_dev_insert:
1485err_block_offload_bind:
1486 tcf_chain0_head_change_cb_del(block, ei);
1487err_chain0_head_change_cb_add:
1488 tcf_block_owner_del(block, q, ei->binder_type);
1489err_block_owner_add:
1490err_block_insert:
1491 tcf_block_refcnt_put(block, true);
1492 return err;
1493}
1494EXPORT_SYMBOL(tcf_block_get_ext);
1495
1496static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1497{
1498 struct tcf_proto __rcu **p_filter_chain = priv;
1499
1500 rcu_assign_pointer(*p_filter_chain, tp_head);
1501}
1502
1503int tcf_block_get(struct tcf_block **p_block,
1504 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1505 struct netlink_ext_ack *extack)
1506{
1507 struct tcf_block_ext_info ei = {
1508 .chain_head_change = tcf_chain_head_change_dflt,
1509 .chain_head_change_priv = p_filter_chain,
1510 };
1511
1512 WARN_ON(!p_filter_chain);
1513 return tcf_block_get_ext(p_block, q, &ei, extack);
1514}
1515EXPORT_SYMBOL(tcf_block_get);
1516
1517/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1518 * actions should be all removed after flushing.
1519 */
1520void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1521 struct tcf_block_ext_info *ei)
1522{
1523 struct net_device *dev = qdisc_dev(q);
1524
1525 if (!block)
1526 return;
1527 if (tcf_block_tracks_dev(block, ei))
1528 xa_erase(&block->ports, dev->ifindex);
1529 tcf_chain0_head_change_cb_del(block, ei);
1530 tcf_block_owner_del(block, q, ei->binder_type);
1531
1532 __tcf_block_put(block, q, ei, true);
1533}
1534EXPORT_SYMBOL(tcf_block_put_ext);
1535
1536void tcf_block_put(struct tcf_block *block)
1537{
1538 struct tcf_block_ext_info ei = {0, };
1539
1540 if (!block)
1541 return;
1542 tcf_block_put_ext(block, block->q, &ei);
1543}
1544
1545EXPORT_SYMBOL(tcf_block_put);
1546
1547static int
1548tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1549 void *cb_priv, bool add, bool offload_in_use,
1550 struct netlink_ext_ack *extack)
1551{
1552 struct tcf_chain *chain, *chain_prev;
1553 struct tcf_proto *tp, *tp_prev;
1554 int err;
1555
1556 lockdep_assert_held(&block->cb_lock);
1557
1558 for (chain = __tcf_get_next_chain(block, NULL);
1559 chain;
1560 chain_prev = chain,
1561 chain = __tcf_get_next_chain(block, chain),
1562 tcf_chain_put(chain_prev)) {
1563 if (chain->tmplt_ops && add)
1564 chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1565 cb_priv);
1566 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1567 tp_prev = tp,
1568 tp = __tcf_get_next_proto(chain, tp),
1569 tcf_proto_put(tp_prev, true, NULL)) {
1570 if (tp->ops->reoffload) {
1571 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1572 extack);
1573 if (err && add)
1574 goto err_playback_remove;
1575 } else if (add && offload_in_use) {
1576 err = -EOPNOTSUPP;
1577 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1578 goto err_playback_remove;
1579 }
1580 }
1581 if (chain->tmplt_ops && !add)
1582 chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1583 cb_priv);
1584 }
1585
1586 return 0;
1587
1588err_playback_remove:
1589 tcf_proto_put(tp, true, NULL);
1590 tcf_chain_put(chain);
1591 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1592 extack);
1593 return err;
1594}
1595
1596static int tcf_block_bind(struct tcf_block *block,
1597 struct flow_block_offload *bo)
1598{
1599 struct flow_block_cb *block_cb, *next;
1600 int err, i = 0;
1601
1602 lockdep_assert_held(&block->cb_lock);
1603
1604 list_for_each_entry(block_cb, &bo->cb_list, list) {
1605 err = tcf_block_playback_offloads(block, block_cb->cb,
1606 block_cb->cb_priv, true,
1607 tcf_block_offload_in_use(block),
1608 bo->extack);
1609 if (err)
1610 goto err_unroll;
1611 if (!bo->unlocked_driver_cb)
1612 block->lockeddevcnt++;
1613
1614 i++;
1615 }
1616 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1617
1618 return 0;
1619
1620err_unroll:
1621 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1622 list_del(&block_cb->driver_list);
1623 if (i-- > 0) {
1624 list_del(&block_cb->list);
1625 tcf_block_playback_offloads(block, block_cb->cb,
1626 block_cb->cb_priv, false,
1627 tcf_block_offload_in_use(block),
1628 NULL);
1629 if (!bo->unlocked_driver_cb)
1630 block->lockeddevcnt--;
1631 }
1632 flow_block_cb_free(block_cb);
1633 }
1634
1635 return err;
1636}
1637
1638static void tcf_block_unbind(struct tcf_block *block,
1639 struct flow_block_offload *bo)
1640{
1641 struct flow_block_cb *block_cb, *next;
1642
1643 lockdep_assert_held(&block->cb_lock);
1644
1645 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1646 tcf_block_playback_offloads(block, block_cb->cb,
1647 block_cb->cb_priv, false,
1648 tcf_block_offload_in_use(block),
1649 NULL);
1650 list_del(&block_cb->list);
1651 flow_block_cb_free(block_cb);
1652 if (!bo->unlocked_driver_cb)
1653 block->lockeddevcnt--;
1654 }
1655}
1656
1657static int tcf_block_setup(struct tcf_block *block,
1658 struct flow_block_offload *bo)
1659{
1660 int err;
1661
1662 switch (bo->command) {
1663 case FLOW_BLOCK_BIND:
1664 err = tcf_block_bind(block, bo);
1665 break;
1666 case FLOW_BLOCK_UNBIND:
1667 err = 0;
1668 tcf_block_unbind(block, bo);
1669 break;
1670 default:
1671 WARN_ON_ONCE(1);
1672 err = -EOPNOTSUPP;
1673 }
1674
1675 return err;
1676}
1677
1678/* Main classifier routine: scans classifier chain attached
1679 * to this qdisc, (optionally) tests for protocol and asks
1680 * specific classifiers.
1681 */
1682static inline int __tcf_classify(struct sk_buff *skb,
1683 const struct tcf_proto *tp,
1684 const struct tcf_proto *orig_tp,
1685 struct tcf_result *res,
1686 bool compat_mode,
1687 struct tcf_exts_miss_cookie_node *n,
1688 int act_index,
1689 u32 *last_executed_chain)
1690{
1691#ifdef CONFIG_NET_CLS_ACT
1692 const int max_reclassify_loop = 16;
1693 const struct tcf_proto *first_tp;
1694 int limit = 0;
1695
1696reclassify:
1697#endif
1698 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1699 __be16 protocol = skb_protocol(skb, false);
1700 int err = 0;
1701
1702 if (n) {
1703 struct tcf_exts *exts;
1704
1705 if (n->tp_prio != tp->prio)
1706 continue;
1707
1708 /* We re-lookup the tp and chain based on index instead
1709 * of having hard refs and locks to them, so do a sanity
1710 * check if any of tp,chain,exts was replaced by the
1711 * time we got here with a cookie from hardware.
1712 */
1713 if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1714 !tp->ops->get_exts)) {
1715 tcf_set_drop_reason(skb,
1716 SKB_DROP_REASON_TC_COOKIE_ERROR);
1717 return TC_ACT_SHOT;
1718 }
1719
1720 exts = tp->ops->get_exts(tp, n->handle);
1721 if (unlikely(!exts || n->exts != exts)) {
1722 tcf_set_drop_reason(skb,
1723 SKB_DROP_REASON_TC_COOKIE_ERROR);
1724 return TC_ACT_SHOT;
1725 }
1726
1727 n = NULL;
1728 err = tcf_exts_exec_ex(skb, exts, act_index, res);
1729 } else {
1730 if (tp->protocol != protocol &&
1731 tp->protocol != htons(ETH_P_ALL))
1732 continue;
1733
1734 err = tc_classify(skb, tp, res);
1735 }
1736#ifdef CONFIG_NET_CLS_ACT
1737 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1738 first_tp = orig_tp;
1739 *last_executed_chain = first_tp->chain->index;
1740 goto reset;
1741 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1742 first_tp = res->goto_tp;
1743 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1744 goto reset;
1745 }
1746#endif
1747 if (err >= 0)
1748 return err;
1749 }
1750
1751 if (unlikely(n)) {
1752 tcf_set_drop_reason(skb,
1753 SKB_DROP_REASON_TC_COOKIE_ERROR);
1754 return TC_ACT_SHOT;
1755 }
1756
1757 return TC_ACT_UNSPEC; /* signal: continue lookup */
1758#ifdef CONFIG_NET_CLS_ACT
1759reset:
1760 if (unlikely(limit++ >= max_reclassify_loop)) {
1761 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1762 tp->chain->block->index,
1763 tp->prio & 0xffff,
1764 ntohs(tp->protocol));
1765 tcf_set_drop_reason(skb,
1766 SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1767 return TC_ACT_SHOT;
1768 }
1769
1770 tp = first_tp;
1771 goto reclassify;
1772#endif
1773}
1774
1775int tcf_classify(struct sk_buff *skb,
1776 const struct tcf_block *block,
1777 const struct tcf_proto *tp,
1778 struct tcf_result *res, bool compat_mode)
1779{
1780#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1781 u32 last_executed_chain = 0;
1782
1783 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1784 &last_executed_chain);
1785#else
1786 u32 last_executed_chain = tp ? tp->chain->index : 0;
1787 struct tcf_exts_miss_cookie_node *n = NULL;
1788 const struct tcf_proto *orig_tp = tp;
1789 struct tc_skb_ext *ext;
1790 int act_index = 0;
1791 int ret;
1792
1793 if (block) {
1794 ext = skb_ext_find(skb, TC_SKB_EXT);
1795
1796 if (ext && (ext->chain || ext->act_miss)) {
1797 struct tcf_chain *fchain;
1798 u32 chain;
1799
1800 if (ext->act_miss) {
1801 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1802 &act_index);
1803 if (!n) {
1804 tcf_set_drop_reason(skb,
1805 SKB_DROP_REASON_TC_COOKIE_ERROR);
1806 return TC_ACT_SHOT;
1807 }
1808
1809 chain = n->chain_index;
1810 } else {
1811 chain = ext->chain;
1812 }
1813
1814 fchain = tcf_chain_lookup_rcu(block, chain);
1815 if (!fchain) {
1816 tcf_set_drop_reason(skb,
1817 SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1818
1819 return TC_ACT_SHOT;
1820 }
1821
1822 /* Consume, so cloned/redirect skbs won't inherit ext */
1823 skb_ext_del(skb, TC_SKB_EXT);
1824
1825 tp = rcu_dereference_bh(fchain->filter_chain);
1826 last_executed_chain = fchain->index;
1827 }
1828 }
1829
1830 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1831 &last_executed_chain);
1832
1833 if (tc_skb_ext_tc_enabled()) {
1834 /* If we missed on some chain */
1835 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1836 struct tc_skb_cb *cb = tc_skb_cb(skb);
1837
1838 ext = tc_skb_ext_alloc(skb);
1839 if (WARN_ON_ONCE(!ext)) {
1840 tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1841 return TC_ACT_SHOT;
1842 }
1843 ext->chain = last_executed_chain;
1844 ext->mru = cb->mru;
1845 ext->post_ct = cb->post_ct;
1846 ext->post_ct_snat = cb->post_ct_snat;
1847 ext->post_ct_dnat = cb->post_ct_dnat;
1848 ext->zone = cb->zone;
1849 }
1850 }
1851
1852 return ret;
1853#endif
1854}
1855EXPORT_SYMBOL(tcf_classify);
1856
1857struct tcf_chain_info {
1858 struct tcf_proto __rcu **pprev;
1859 struct tcf_proto __rcu *next;
1860};
1861
1862static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1863 struct tcf_chain_info *chain_info)
1864{
1865 return tcf_chain_dereference(*chain_info->pprev, chain);
1866}
1867
1868static int tcf_chain_tp_insert(struct tcf_chain *chain,
1869 struct tcf_chain_info *chain_info,
1870 struct tcf_proto *tp)
1871{
1872 if (chain->flushing)
1873 return -EAGAIN;
1874
1875 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1876 if (*chain_info->pprev == chain->filter_chain)
1877 tcf_chain0_head_change(chain, tp);
1878 tcf_proto_get(tp);
1879 rcu_assign_pointer(*chain_info->pprev, tp);
1880
1881 return 0;
1882}
1883
1884static void tcf_chain_tp_remove(struct tcf_chain *chain,
1885 struct tcf_chain_info *chain_info,
1886 struct tcf_proto *tp)
1887{
1888 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1889
1890 tcf_proto_mark_delete(tp);
1891 if (tp == chain->filter_chain)
1892 tcf_chain0_head_change(chain, next);
1893 RCU_INIT_POINTER(*chain_info->pprev, next);
1894}
1895
1896static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1897 struct tcf_chain_info *chain_info,
1898 u32 protocol, u32 prio,
1899 bool prio_allocate);
1900
1901/* Try to insert new proto.
1902 * If proto with specified priority already exists, free new proto
1903 * and return existing one.
1904 */
1905
1906static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1907 struct tcf_proto *tp_new,
1908 u32 protocol, u32 prio,
1909 bool rtnl_held)
1910{
1911 struct tcf_chain_info chain_info;
1912 struct tcf_proto *tp;
1913 int err = 0;
1914
1915 mutex_lock(&chain->filter_chain_lock);
1916
1917 if (tcf_proto_exists_destroying(chain, tp_new)) {
1918 mutex_unlock(&chain->filter_chain_lock);
1919 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1920 return ERR_PTR(-EAGAIN);
1921 }
1922
1923 tp = tcf_chain_tp_find(chain, &chain_info,
1924 protocol, prio, false);
1925 if (!tp)
1926 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1927 mutex_unlock(&chain->filter_chain_lock);
1928
1929 if (tp) {
1930 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1931 tp_new = tp;
1932 } else if (err) {
1933 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1934 tp_new = ERR_PTR(err);
1935 }
1936
1937 return tp_new;
1938}
1939
1940static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1941 struct tcf_proto *tp, bool rtnl_held,
1942 struct netlink_ext_ack *extack)
1943{
1944 struct tcf_chain_info chain_info;
1945 struct tcf_proto *tp_iter;
1946 struct tcf_proto **pprev;
1947 struct tcf_proto *next;
1948
1949 mutex_lock(&chain->filter_chain_lock);
1950
1951 /* Atomically find and remove tp from chain. */
1952 for (pprev = &chain->filter_chain;
1953 (tp_iter = tcf_chain_dereference(*pprev, chain));
1954 pprev = &tp_iter->next) {
1955 if (tp_iter == tp) {
1956 chain_info.pprev = pprev;
1957 chain_info.next = tp_iter->next;
1958 WARN_ON(tp_iter->deleting);
1959 break;
1960 }
1961 }
1962 /* Verify that tp still exists and no new filters were inserted
1963 * concurrently.
1964 * Mark tp for deletion if it is empty.
1965 */
1966 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1967 mutex_unlock(&chain->filter_chain_lock);
1968 return;
1969 }
1970
1971 tcf_proto_signal_destroying(chain, tp);
1972 next = tcf_chain_dereference(chain_info.next, chain);
1973 if (tp == chain->filter_chain)
1974 tcf_chain0_head_change(chain, next);
1975 RCU_INIT_POINTER(*chain_info.pprev, next);
1976 mutex_unlock(&chain->filter_chain_lock);
1977
1978 tcf_proto_put(tp, rtnl_held, extack);
1979}
1980
1981static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1982 struct tcf_chain_info *chain_info,
1983 u32 protocol, u32 prio,
1984 bool prio_allocate)
1985{
1986 struct tcf_proto **pprev;
1987 struct tcf_proto *tp;
1988
1989 /* Check the chain for existence of proto-tcf with this priority */
1990 for (pprev = &chain->filter_chain;
1991 (tp = tcf_chain_dereference(*pprev, chain));
1992 pprev = &tp->next) {
1993 if (tp->prio >= prio) {
1994 if (tp->prio == prio) {
1995 if (prio_allocate ||
1996 (tp->protocol != protocol && protocol))
1997 return ERR_PTR(-EINVAL);
1998 } else {
1999 tp = NULL;
2000 }
2001 break;
2002 }
2003 }
2004 chain_info->pprev = pprev;
2005 if (tp) {
2006 chain_info->next = tp->next;
2007 tcf_proto_get(tp);
2008 } else {
2009 chain_info->next = NULL;
2010 }
2011 return tp;
2012}
2013
2014static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2015 struct tcf_proto *tp, struct tcf_block *block,
2016 struct Qdisc *q, u32 parent, void *fh,
2017 u32 portid, u32 seq, u16 flags, int event,
2018 bool terse_dump, bool rtnl_held,
2019 struct netlink_ext_ack *extack)
2020{
2021 struct tcmsg *tcm;
2022 struct nlmsghdr *nlh;
2023 unsigned char *b = skb_tail_pointer(skb);
2024
2025 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2026 if (!nlh)
2027 goto out_nlmsg_trim;
2028 tcm = nlmsg_data(nlh);
2029 tcm->tcm_family = AF_UNSPEC;
2030 tcm->tcm__pad1 = 0;
2031 tcm->tcm__pad2 = 0;
2032 if (q) {
2033 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2034 tcm->tcm_parent = parent;
2035 } else {
2036 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2037 tcm->tcm_block_index = block->index;
2038 }
2039 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2040 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2041 goto nla_put_failure;
2042 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2043 goto nla_put_failure;
2044 if (!fh) {
2045 tcm->tcm_handle = 0;
2046 } else if (terse_dump) {
2047 if (tp->ops->terse_dump) {
2048 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2049 rtnl_held) < 0)
2050 goto nla_put_failure;
2051 } else {
2052 goto cls_op_not_supp;
2053 }
2054 } else {
2055 if (tp->ops->dump &&
2056 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2057 goto nla_put_failure;
2058 }
2059
2060 if (extack && extack->_msg &&
2061 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2062 goto nla_put_failure;
2063
2064 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2065
2066 return skb->len;
2067
2068out_nlmsg_trim:
2069nla_put_failure:
2070cls_op_not_supp:
2071 nlmsg_trim(skb, b);
2072 return -1;
2073}
2074
2075static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2076 struct nlmsghdr *n, struct tcf_proto *tp,
2077 struct tcf_block *block, struct Qdisc *q,
2078 u32 parent, void *fh, int event, bool unicast,
2079 bool rtnl_held, struct netlink_ext_ack *extack)
2080{
2081 struct sk_buff *skb;
2082 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2083 int err = 0;
2084
2085 if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2086 return 0;
2087
2088 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2089 if (!skb)
2090 return -ENOBUFS;
2091
2092 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2093 n->nlmsg_seq, n->nlmsg_flags, event,
2094 false, rtnl_held, extack) <= 0) {
2095 kfree_skb(skb);
2096 return -EINVAL;
2097 }
2098
2099 if (unicast)
2100 err = rtnl_unicast(skb, net, portid);
2101 else
2102 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2103 n->nlmsg_flags & NLM_F_ECHO);
2104 return err;
2105}
2106
2107static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2108 struct nlmsghdr *n, struct tcf_proto *tp,
2109 struct tcf_block *block, struct Qdisc *q,
2110 u32 parent, void *fh, bool *last, bool rtnl_held,
2111 struct netlink_ext_ack *extack)
2112{
2113 struct sk_buff *skb;
2114 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2115 int err;
2116
2117 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2118 return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2119
2120 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2121 if (!skb)
2122 return -ENOBUFS;
2123
2124 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2125 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2126 false, rtnl_held, extack) <= 0) {
2127 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2128 kfree_skb(skb);
2129 return -EINVAL;
2130 }
2131
2132 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2133 if (err) {
2134 kfree_skb(skb);
2135 return err;
2136 }
2137
2138 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2139 n->nlmsg_flags & NLM_F_ECHO);
2140 if (err < 0)
2141 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2142
2143 return err;
2144}
2145
2146static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2147 struct tcf_block *block, struct Qdisc *q,
2148 u32 parent, struct nlmsghdr *n,
2149 struct tcf_chain *chain, int event,
2150 struct netlink_ext_ack *extack)
2151{
2152 struct tcf_proto *tp;
2153
2154 for (tp = tcf_get_next_proto(chain, NULL);
2155 tp; tp = tcf_get_next_proto(chain, tp))
2156 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2157 event, false, true, extack);
2158}
2159
2160static void tfilter_put(struct tcf_proto *tp, void *fh)
2161{
2162 if (tp->ops->put && fh)
2163 tp->ops->put(tp, fh);
2164}
2165
2166static bool is_qdisc_ingress(__u32 classid)
2167{
2168 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2169}
2170
2171static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2172 struct netlink_ext_ack *extack)
2173{
2174 struct net *net = sock_net(skb->sk);
2175 struct nlattr *tca[TCA_MAX + 1];
2176 char name[IFNAMSIZ];
2177 struct tcmsg *t;
2178 u32 protocol;
2179 u32 prio;
2180 bool prio_allocate;
2181 u32 parent;
2182 u32 chain_index;
2183 struct Qdisc *q;
2184 struct tcf_chain_info chain_info;
2185 struct tcf_chain *chain;
2186 struct tcf_block *block;
2187 struct tcf_proto *tp;
2188 unsigned long cl;
2189 void *fh;
2190 int err;
2191 int tp_created;
2192 bool rtnl_held = false;
2193 u32 flags;
2194
2195replay:
2196 tp_created = 0;
2197
2198 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2199 rtm_tca_policy, extack);
2200 if (err < 0)
2201 return err;
2202
2203 t = nlmsg_data(n);
2204 protocol = TC_H_MIN(t->tcm_info);
2205 prio = TC_H_MAJ(t->tcm_info);
2206 prio_allocate = false;
2207 parent = t->tcm_parent;
2208 tp = NULL;
2209 cl = 0;
2210 block = NULL;
2211 q = NULL;
2212 chain = NULL;
2213 flags = 0;
2214
2215 if (prio == 0) {
2216 /* If no priority is provided by the user,
2217 * we allocate one.
2218 */
2219 if (n->nlmsg_flags & NLM_F_CREATE) {
2220 prio = TC_H_MAKE(0x80000000U, 0U);
2221 prio_allocate = true;
2222 } else {
2223 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2224 return -ENOENT;
2225 }
2226 }
2227
2228 /* Find head of filter chain. */
2229
2230 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2231 if (err)
2232 return err;
2233
2234 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2235 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2236 err = -EINVAL;
2237 goto errout;
2238 }
2239
2240 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2241 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2242 * type is not specified, classifier is not unlocked.
2243 */
2244 if (rtnl_held ||
2245 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2246 !tcf_proto_is_unlocked(name)) {
2247 rtnl_held = true;
2248 rtnl_lock();
2249 }
2250
2251 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2252 if (err)
2253 goto errout;
2254
2255 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2256 extack);
2257 if (IS_ERR(block)) {
2258 err = PTR_ERR(block);
2259 goto errout;
2260 }
2261 block->classid = parent;
2262
2263 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2264 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2265 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2266 err = -EINVAL;
2267 goto errout;
2268 }
2269 chain = tcf_chain_get(block, chain_index, true);
2270 if (!chain) {
2271 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2272 err = -ENOMEM;
2273 goto errout;
2274 }
2275
2276 mutex_lock(&chain->filter_chain_lock);
2277 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2278 prio, prio_allocate);
2279 if (IS_ERR(tp)) {
2280 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2281 err = PTR_ERR(tp);
2282 goto errout_locked;
2283 }
2284
2285 if (tp == NULL) {
2286 struct tcf_proto *tp_new = NULL;
2287
2288 if (chain->flushing) {
2289 err = -EAGAIN;
2290 goto errout_locked;
2291 }
2292
2293 /* Proto-tcf does not exist, create new one */
2294
2295 if (tca[TCA_KIND] == NULL || !protocol) {
2296 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2297 err = -EINVAL;
2298 goto errout_locked;
2299 }
2300
2301 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2302 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2303 err = -ENOENT;
2304 goto errout_locked;
2305 }
2306
2307 if (prio_allocate)
2308 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2309 &chain_info));
2310
2311 mutex_unlock(&chain->filter_chain_lock);
2312 tp_new = tcf_proto_create(name, protocol, prio, chain,
2313 rtnl_held, extack);
2314 if (IS_ERR(tp_new)) {
2315 err = PTR_ERR(tp_new);
2316 goto errout_tp;
2317 }
2318
2319 tp_created = 1;
2320 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2321 rtnl_held);
2322 if (IS_ERR(tp)) {
2323 err = PTR_ERR(tp);
2324 goto errout_tp;
2325 }
2326 } else {
2327 mutex_unlock(&chain->filter_chain_lock);
2328 }
2329
2330 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2331 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2332 err = -EINVAL;
2333 goto errout;
2334 }
2335
2336 fh = tp->ops->get(tp, t->tcm_handle);
2337
2338 if (!fh) {
2339 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2340 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2341 err = -ENOENT;
2342 goto errout;
2343 }
2344 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2345 tfilter_put(tp, fh);
2346 NL_SET_ERR_MSG(extack, "Filter already exists");
2347 err = -EEXIST;
2348 goto errout;
2349 }
2350
2351 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2352 tfilter_put(tp, fh);
2353 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2354 err = -EINVAL;
2355 goto errout;
2356 }
2357
2358 if (!(n->nlmsg_flags & NLM_F_CREATE))
2359 flags |= TCA_ACT_FLAGS_REPLACE;
2360 if (!rtnl_held)
2361 flags |= TCA_ACT_FLAGS_NO_RTNL;
2362 if (is_qdisc_ingress(parent))
2363 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2364 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2365 flags, extack);
2366 if (err == 0) {
2367 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2368 RTM_NEWTFILTER, false, rtnl_held, extack);
2369 tfilter_put(tp, fh);
2370 /* q pointer is NULL for shared blocks */
2371 if (q)
2372 q->flags &= ~TCQ_F_CAN_BYPASS;
2373 }
2374
2375errout:
2376 if (err && tp_created)
2377 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2378errout_tp:
2379 if (chain) {
2380 if (tp && !IS_ERR(tp))
2381 tcf_proto_put(tp, rtnl_held, NULL);
2382 if (!tp_created)
2383 tcf_chain_put(chain);
2384 }
2385 tcf_block_release(q, block, rtnl_held);
2386
2387 if (rtnl_held)
2388 rtnl_unlock();
2389
2390 if (err == -EAGAIN) {
2391 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2392 * of target chain.
2393 */
2394 rtnl_held = true;
2395 /* Replay the request. */
2396 goto replay;
2397 }
2398 return err;
2399
2400errout_locked:
2401 mutex_unlock(&chain->filter_chain_lock);
2402 goto errout;
2403}
2404
2405static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2406 struct netlink_ext_ack *extack)
2407{
2408 struct net *net = sock_net(skb->sk);
2409 struct nlattr *tca[TCA_MAX + 1];
2410 char name[IFNAMSIZ];
2411 struct tcmsg *t;
2412 u32 protocol;
2413 u32 prio;
2414 u32 parent;
2415 u32 chain_index;
2416 struct Qdisc *q = NULL;
2417 struct tcf_chain_info chain_info;
2418 struct tcf_chain *chain = NULL;
2419 struct tcf_block *block = NULL;
2420 struct tcf_proto *tp = NULL;
2421 unsigned long cl = 0;
2422 void *fh = NULL;
2423 int err;
2424 bool rtnl_held = false;
2425
2426 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2427 rtm_tca_policy, extack);
2428 if (err < 0)
2429 return err;
2430
2431 t = nlmsg_data(n);
2432 protocol = TC_H_MIN(t->tcm_info);
2433 prio = TC_H_MAJ(t->tcm_info);
2434 parent = t->tcm_parent;
2435
2436 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2437 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2438 return -ENOENT;
2439 }
2440
2441 /* Find head of filter chain. */
2442
2443 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2444 if (err)
2445 return err;
2446
2447 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2448 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2449 err = -EINVAL;
2450 goto errout;
2451 }
2452 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2453 * found), qdisc is not unlocked, classifier type is not specified,
2454 * classifier is not unlocked.
2455 */
2456 if (!prio ||
2457 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2458 !tcf_proto_is_unlocked(name)) {
2459 rtnl_held = true;
2460 rtnl_lock();
2461 }
2462
2463 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2464 if (err)
2465 goto errout;
2466
2467 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2468 extack);
2469 if (IS_ERR(block)) {
2470 err = PTR_ERR(block);
2471 goto errout;
2472 }
2473
2474 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2475 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2476 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2477 err = -EINVAL;
2478 goto errout;
2479 }
2480 chain = tcf_chain_get(block, chain_index, false);
2481 if (!chain) {
2482 /* User requested flush on non-existent chain. Nothing to do,
2483 * so just return success.
2484 */
2485 if (prio == 0) {
2486 err = 0;
2487 goto errout;
2488 }
2489 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2490 err = -ENOENT;
2491 goto errout;
2492 }
2493
2494 if (prio == 0) {
2495 tfilter_notify_chain(net, skb, block, q, parent, n,
2496 chain, RTM_DELTFILTER, extack);
2497 tcf_chain_flush(chain, rtnl_held);
2498 err = 0;
2499 goto errout;
2500 }
2501
2502 mutex_lock(&chain->filter_chain_lock);
2503 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2504 prio, false);
2505 if (!tp || IS_ERR(tp)) {
2506 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2507 err = tp ? PTR_ERR(tp) : -ENOENT;
2508 goto errout_locked;
2509 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2510 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2511 err = -EINVAL;
2512 goto errout_locked;
2513 } else if (t->tcm_handle == 0) {
2514 tcf_proto_signal_destroying(chain, tp);
2515 tcf_chain_tp_remove(chain, &chain_info, tp);
2516 mutex_unlock(&chain->filter_chain_lock);
2517
2518 tcf_proto_put(tp, rtnl_held, NULL);
2519 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2520 RTM_DELTFILTER, false, rtnl_held, extack);
2521 err = 0;
2522 goto errout;
2523 }
2524 mutex_unlock(&chain->filter_chain_lock);
2525
2526 fh = tp->ops->get(tp, t->tcm_handle);
2527
2528 if (!fh) {
2529 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2530 err = -ENOENT;
2531 } else {
2532 bool last;
2533
2534 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2535 &last, rtnl_held, extack);
2536
2537 if (err)
2538 goto errout;
2539 if (last)
2540 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2541 }
2542
2543errout:
2544 if (chain) {
2545 if (tp && !IS_ERR(tp))
2546 tcf_proto_put(tp, rtnl_held, NULL);
2547 tcf_chain_put(chain);
2548 }
2549 tcf_block_release(q, block, rtnl_held);
2550
2551 if (rtnl_held)
2552 rtnl_unlock();
2553
2554 return err;
2555
2556errout_locked:
2557 mutex_unlock(&chain->filter_chain_lock);
2558 goto errout;
2559}
2560
2561static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2562 struct netlink_ext_ack *extack)
2563{
2564 struct net *net = sock_net(skb->sk);
2565 struct nlattr *tca[TCA_MAX + 1];
2566 char name[IFNAMSIZ];
2567 struct tcmsg *t;
2568 u32 protocol;
2569 u32 prio;
2570 u32 parent;
2571 u32 chain_index;
2572 struct Qdisc *q = NULL;
2573 struct tcf_chain_info chain_info;
2574 struct tcf_chain *chain = NULL;
2575 struct tcf_block *block = NULL;
2576 struct tcf_proto *tp = NULL;
2577 unsigned long cl = 0;
2578 void *fh = NULL;
2579 int err;
2580 bool rtnl_held = false;
2581
2582 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2583 rtm_tca_policy, extack);
2584 if (err < 0)
2585 return err;
2586
2587 t = nlmsg_data(n);
2588 protocol = TC_H_MIN(t->tcm_info);
2589 prio = TC_H_MAJ(t->tcm_info);
2590 parent = t->tcm_parent;
2591
2592 if (prio == 0) {
2593 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2594 return -ENOENT;
2595 }
2596
2597 /* Find head of filter chain. */
2598
2599 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2600 if (err)
2601 return err;
2602
2603 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2604 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2605 err = -EINVAL;
2606 goto errout;
2607 }
2608 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2609 * unlocked, classifier type is not specified, classifier is not
2610 * unlocked.
2611 */
2612 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2613 !tcf_proto_is_unlocked(name)) {
2614 rtnl_held = true;
2615 rtnl_lock();
2616 }
2617
2618 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2619 if (err)
2620 goto errout;
2621
2622 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2623 extack);
2624 if (IS_ERR(block)) {
2625 err = PTR_ERR(block);
2626 goto errout;
2627 }
2628
2629 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2630 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2631 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2632 err = -EINVAL;
2633 goto errout;
2634 }
2635 chain = tcf_chain_get(block, chain_index, false);
2636 if (!chain) {
2637 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2638 err = -EINVAL;
2639 goto errout;
2640 }
2641
2642 mutex_lock(&chain->filter_chain_lock);
2643 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2644 prio, false);
2645 mutex_unlock(&chain->filter_chain_lock);
2646 if (!tp || IS_ERR(tp)) {
2647 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2648 err = tp ? PTR_ERR(tp) : -ENOENT;
2649 goto errout;
2650 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2651 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2652 err = -EINVAL;
2653 goto errout;
2654 }
2655
2656 fh = tp->ops->get(tp, t->tcm_handle);
2657
2658 if (!fh) {
2659 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2660 err = -ENOENT;
2661 } else {
2662 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2663 fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2664 if (err < 0)
2665 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2666 }
2667
2668 tfilter_put(tp, fh);
2669errout:
2670 if (chain) {
2671 if (tp && !IS_ERR(tp))
2672 tcf_proto_put(tp, rtnl_held, NULL);
2673 tcf_chain_put(chain);
2674 }
2675 tcf_block_release(q, block, rtnl_held);
2676
2677 if (rtnl_held)
2678 rtnl_unlock();
2679
2680 return err;
2681}
2682
2683struct tcf_dump_args {
2684 struct tcf_walker w;
2685 struct sk_buff *skb;
2686 struct netlink_callback *cb;
2687 struct tcf_block *block;
2688 struct Qdisc *q;
2689 u32 parent;
2690 bool terse_dump;
2691};
2692
2693static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2694{
2695 struct tcf_dump_args *a = (void *)arg;
2696 struct net *net = sock_net(a->skb->sk);
2697
2698 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2699 n, NETLINK_CB(a->cb->skb).portid,
2700 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2701 RTM_NEWTFILTER, a->terse_dump, true, NULL);
2702}
2703
2704static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2705 struct sk_buff *skb, struct netlink_callback *cb,
2706 long index_start, long *p_index, bool terse)
2707{
2708 struct net *net = sock_net(skb->sk);
2709 struct tcf_block *block = chain->block;
2710 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2711 struct tcf_proto *tp, *tp_prev;
2712 struct tcf_dump_args arg;
2713
2714 for (tp = __tcf_get_next_proto(chain, NULL);
2715 tp;
2716 tp_prev = tp,
2717 tp = __tcf_get_next_proto(chain, tp),
2718 tcf_proto_put(tp_prev, true, NULL),
2719 (*p_index)++) {
2720 if (*p_index < index_start)
2721 continue;
2722 if (TC_H_MAJ(tcm->tcm_info) &&
2723 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2724 continue;
2725 if (TC_H_MIN(tcm->tcm_info) &&
2726 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2727 continue;
2728 if (*p_index > index_start)
2729 memset(&cb->args[1], 0,
2730 sizeof(cb->args) - sizeof(cb->args[0]));
2731 if (cb->args[1] == 0) {
2732 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2733 NETLINK_CB(cb->skb).portid,
2734 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2735 RTM_NEWTFILTER, false, true, NULL) <= 0)
2736 goto errout;
2737 cb->args[1] = 1;
2738 }
2739 if (!tp->ops->walk)
2740 continue;
2741 arg.w.fn = tcf_node_dump;
2742 arg.skb = skb;
2743 arg.cb = cb;
2744 arg.block = block;
2745 arg.q = q;
2746 arg.parent = parent;
2747 arg.w.stop = 0;
2748 arg.w.skip = cb->args[1] - 1;
2749 arg.w.count = 0;
2750 arg.w.cookie = cb->args[2];
2751 arg.terse_dump = terse;
2752 tp->ops->walk(tp, &arg.w, true);
2753 cb->args[2] = arg.w.cookie;
2754 cb->args[1] = arg.w.count + 1;
2755 if (arg.w.stop)
2756 goto errout;
2757 }
2758 return true;
2759
2760errout:
2761 tcf_proto_put(tp, true, NULL);
2762 return false;
2763}
2764
2765static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2766 [TCA_CHAIN] = { .type = NLA_U32 },
2767 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2768};
2769
2770/* called with RTNL */
2771static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2772{
2773 struct tcf_chain *chain, *chain_prev;
2774 struct net *net = sock_net(skb->sk);
2775 struct nlattr *tca[TCA_MAX + 1];
2776 struct Qdisc *q = NULL;
2777 struct tcf_block *block;
2778 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2779 bool terse_dump = false;
2780 long index_start;
2781 long index;
2782 u32 parent;
2783 int err;
2784
2785 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2786 return skb->len;
2787
2788 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2789 tcf_tfilter_dump_policy, cb->extack);
2790 if (err)
2791 return err;
2792
2793 if (tca[TCA_DUMP_FLAGS]) {
2794 struct nla_bitfield32 flags =
2795 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2796
2797 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2798 }
2799
2800 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2801 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2802 if (!block)
2803 goto out;
2804 /* If we work with block index, q is NULL and parent value
2805 * will never be used in the following code. The check
2806 * in tcf_fill_node prevents it. However, compiler does not
2807 * see that far, so set parent to zero to silence the warning
2808 * about parent being uninitialized.
2809 */
2810 parent = 0;
2811 } else {
2812 const struct Qdisc_class_ops *cops;
2813 struct net_device *dev;
2814 unsigned long cl = 0;
2815
2816 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2817 if (!dev)
2818 return skb->len;
2819
2820 parent = tcm->tcm_parent;
2821 if (!parent)
2822 q = rtnl_dereference(dev->qdisc);
2823 else
2824 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2825 if (!q)
2826 goto out;
2827 cops = q->ops->cl_ops;
2828 if (!cops)
2829 goto out;
2830 if (!cops->tcf_block)
2831 goto out;
2832 if (TC_H_MIN(tcm->tcm_parent)) {
2833 cl = cops->find(q, tcm->tcm_parent);
2834 if (cl == 0)
2835 goto out;
2836 }
2837 block = cops->tcf_block(q, cl, NULL);
2838 if (!block)
2839 goto out;
2840 parent = block->classid;
2841 if (tcf_block_shared(block))
2842 q = NULL;
2843 }
2844
2845 index_start = cb->args[0];
2846 index = 0;
2847
2848 for (chain = __tcf_get_next_chain(block, NULL);
2849 chain;
2850 chain_prev = chain,
2851 chain = __tcf_get_next_chain(block, chain),
2852 tcf_chain_put(chain_prev)) {
2853 if (tca[TCA_CHAIN] &&
2854 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2855 continue;
2856 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2857 index_start, &index, terse_dump)) {
2858 tcf_chain_put(chain);
2859 err = -EMSGSIZE;
2860 break;
2861 }
2862 }
2863
2864 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2865 tcf_block_refcnt_put(block, true);
2866 cb->args[0] = index;
2867
2868out:
2869 /* If we did no progress, the error (EMSGSIZE) is real */
2870 if (skb->len == 0 && err)
2871 return err;
2872 return skb->len;
2873}
2874
2875static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2876 void *tmplt_priv, u32 chain_index,
2877 struct net *net, struct sk_buff *skb,
2878 struct tcf_block *block,
2879 u32 portid, u32 seq, u16 flags, int event,
2880 struct netlink_ext_ack *extack)
2881{
2882 unsigned char *b = skb_tail_pointer(skb);
2883 const struct tcf_proto_ops *ops;
2884 struct nlmsghdr *nlh;
2885 struct tcmsg *tcm;
2886 void *priv;
2887
2888 ops = tmplt_ops;
2889 priv = tmplt_priv;
2890
2891 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2892 if (!nlh)
2893 goto out_nlmsg_trim;
2894 tcm = nlmsg_data(nlh);
2895 tcm->tcm_family = AF_UNSPEC;
2896 tcm->tcm__pad1 = 0;
2897 tcm->tcm__pad2 = 0;
2898 tcm->tcm_handle = 0;
2899 if (block->q) {
2900 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2901 tcm->tcm_parent = block->q->handle;
2902 } else {
2903 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2904 tcm->tcm_block_index = block->index;
2905 }
2906
2907 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2908 goto nla_put_failure;
2909
2910 if (ops) {
2911 if (nla_put_string(skb, TCA_KIND, ops->kind))
2912 goto nla_put_failure;
2913 if (ops->tmplt_dump(skb, net, priv) < 0)
2914 goto nla_put_failure;
2915 }
2916
2917 if (extack && extack->_msg &&
2918 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2919 goto out_nlmsg_trim;
2920
2921 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2922
2923 return skb->len;
2924
2925out_nlmsg_trim:
2926nla_put_failure:
2927 nlmsg_trim(skb, b);
2928 return -EMSGSIZE;
2929}
2930
2931static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2932 u32 seq, u16 flags, int event, bool unicast,
2933 struct netlink_ext_ack *extack)
2934{
2935 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2936 struct tcf_block *block = chain->block;
2937 struct net *net = block->net;
2938 struct sk_buff *skb;
2939 int err = 0;
2940
2941 if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
2942 return 0;
2943
2944 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2945 if (!skb)
2946 return -ENOBUFS;
2947
2948 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2949 chain->index, net, skb, block, portid,
2950 seq, flags, event, extack) <= 0) {
2951 kfree_skb(skb);
2952 return -EINVAL;
2953 }
2954
2955 if (unicast)
2956 err = rtnl_unicast(skb, net, portid);
2957 else
2958 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2959 flags & NLM_F_ECHO);
2960
2961 return err;
2962}
2963
2964static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2965 void *tmplt_priv, u32 chain_index,
2966 struct tcf_block *block, struct sk_buff *oskb,
2967 u32 seq, u16 flags)
2968{
2969 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2970 struct net *net = block->net;
2971 struct sk_buff *skb;
2972
2973 if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
2974 return 0;
2975
2976 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2977 if (!skb)
2978 return -ENOBUFS;
2979
2980 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2981 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
2982 kfree_skb(skb);
2983 return -EINVAL;
2984 }
2985
2986 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2987}
2988
2989static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2990 struct nlattr **tca,
2991 struct netlink_ext_ack *extack)
2992{
2993 const struct tcf_proto_ops *ops;
2994 char name[IFNAMSIZ];
2995 void *tmplt_priv;
2996
2997 /* If kind is not set, user did not specify template. */
2998 if (!tca[TCA_KIND])
2999 return 0;
3000
3001 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3002 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3003 return -EINVAL;
3004 }
3005
3006 ops = tcf_proto_lookup_ops(name, true, extack);
3007 if (IS_ERR(ops))
3008 return PTR_ERR(ops);
3009 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3010 !ops->tmplt_reoffload) {
3011 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3012 module_put(ops->owner);
3013 return -EOPNOTSUPP;
3014 }
3015
3016 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3017 if (IS_ERR(tmplt_priv)) {
3018 module_put(ops->owner);
3019 return PTR_ERR(tmplt_priv);
3020 }
3021 chain->tmplt_ops = ops;
3022 chain->tmplt_priv = tmplt_priv;
3023 return 0;
3024}
3025
3026static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3027 void *tmplt_priv)
3028{
3029 /* If template ops are set, no work to do for us. */
3030 if (!tmplt_ops)
3031 return;
3032
3033 tmplt_ops->tmplt_destroy(tmplt_priv);
3034 module_put(tmplt_ops->owner);
3035}
3036
3037/* Add/delete/get a chain */
3038
3039static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3040 struct netlink_ext_ack *extack)
3041{
3042 struct net *net = sock_net(skb->sk);
3043 struct nlattr *tca[TCA_MAX + 1];
3044 struct tcmsg *t;
3045 u32 parent;
3046 u32 chain_index;
3047 struct Qdisc *q;
3048 struct tcf_chain *chain;
3049 struct tcf_block *block;
3050 unsigned long cl;
3051 int err;
3052
3053replay:
3054 q = NULL;
3055 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3056 rtm_tca_policy, extack);
3057 if (err < 0)
3058 return err;
3059
3060 t = nlmsg_data(n);
3061 parent = t->tcm_parent;
3062 cl = 0;
3063
3064 block = tcf_block_find(net, &q, &parent, &cl,
3065 t->tcm_ifindex, t->tcm_block_index, extack);
3066 if (IS_ERR(block))
3067 return PTR_ERR(block);
3068
3069 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
3070 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3071 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3072 err = -EINVAL;
3073 goto errout_block;
3074 }
3075
3076 mutex_lock(&block->lock);
3077 chain = tcf_chain_lookup(block, chain_index);
3078 if (n->nlmsg_type == RTM_NEWCHAIN) {
3079 if (chain) {
3080 if (tcf_chain_held_by_acts_only(chain)) {
3081 /* The chain exists only because there is
3082 * some action referencing it.
3083 */
3084 tcf_chain_hold(chain);
3085 } else {
3086 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3087 err = -EEXIST;
3088 goto errout_block_locked;
3089 }
3090 } else {
3091 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3092 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3093 err = -ENOENT;
3094 goto errout_block_locked;
3095 }
3096 chain = tcf_chain_create(block, chain_index);
3097 if (!chain) {
3098 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3099 err = -ENOMEM;
3100 goto errout_block_locked;
3101 }
3102 }
3103 } else {
3104 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3105 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3106 err = -EINVAL;
3107 goto errout_block_locked;
3108 }
3109 tcf_chain_hold(chain);
3110 }
3111
3112 if (n->nlmsg_type == RTM_NEWCHAIN) {
3113 /* Modifying chain requires holding parent block lock. In case
3114 * the chain was successfully added, take a reference to the
3115 * chain. This ensures that an empty chain does not disappear at
3116 * the end of this function.
3117 */
3118 tcf_chain_hold(chain);
3119 chain->explicitly_created = true;
3120 }
3121 mutex_unlock(&block->lock);
3122
3123 switch (n->nlmsg_type) {
3124 case RTM_NEWCHAIN:
3125 err = tc_chain_tmplt_add(chain, net, tca, extack);
3126 if (err) {
3127 tcf_chain_put_explicitly_created(chain);
3128 goto errout;
3129 }
3130
3131 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3132 RTM_NEWCHAIN, false, extack);
3133 break;
3134 case RTM_DELCHAIN:
3135 tfilter_notify_chain(net, skb, block, q, parent, n,
3136 chain, RTM_DELTFILTER, extack);
3137 /* Flush the chain first as the user requested chain removal. */
3138 tcf_chain_flush(chain, true);
3139 /* In case the chain was successfully deleted, put a reference
3140 * to the chain previously taken during addition.
3141 */
3142 tcf_chain_put_explicitly_created(chain);
3143 break;
3144 case RTM_GETCHAIN:
3145 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3146 n->nlmsg_flags, n->nlmsg_type, true, extack);
3147 if (err < 0)
3148 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3149 break;
3150 default:
3151 err = -EOPNOTSUPP;
3152 NL_SET_ERR_MSG(extack, "Unsupported message type");
3153 goto errout;
3154 }
3155
3156errout:
3157 tcf_chain_put(chain);
3158errout_block:
3159 tcf_block_release(q, block, true);
3160 if (err == -EAGAIN)
3161 /* Replay the request. */
3162 goto replay;
3163 return err;
3164
3165errout_block_locked:
3166 mutex_unlock(&block->lock);
3167 goto errout_block;
3168}
3169
3170/* called with RTNL */
3171static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3172{
3173 struct net *net = sock_net(skb->sk);
3174 struct nlattr *tca[TCA_MAX + 1];
3175 struct Qdisc *q = NULL;
3176 struct tcf_block *block;
3177 struct tcmsg *tcm = nlmsg_data(cb->nlh);
3178 struct tcf_chain *chain;
3179 long index_start;
3180 long index;
3181 int err;
3182
3183 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3184 return skb->len;
3185
3186 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3187 rtm_tca_policy, cb->extack);
3188 if (err)
3189 return err;
3190
3191 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3192 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3193 if (!block)
3194 goto out;
3195 } else {
3196 const struct Qdisc_class_ops *cops;
3197 struct net_device *dev;
3198 unsigned long cl = 0;
3199
3200 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3201 if (!dev)
3202 return skb->len;
3203
3204 if (!tcm->tcm_parent)
3205 q = rtnl_dereference(dev->qdisc);
3206 else
3207 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3208
3209 if (!q)
3210 goto out;
3211 cops = q->ops->cl_ops;
3212 if (!cops)
3213 goto out;
3214 if (!cops->tcf_block)
3215 goto out;
3216 if (TC_H_MIN(tcm->tcm_parent)) {
3217 cl = cops->find(q, tcm->tcm_parent);
3218 if (cl == 0)
3219 goto out;
3220 }
3221 block = cops->tcf_block(q, cl, NULL);
3222 if (!block)
3223 goto out;
3224 if (tcf_block_shared(block))
3225 q = NULL;
3226 }
3227
3228 index_start = cb->args[0];
3229 index = 0;
3230
3231 mutex_lock(&block->lock);
3232 list_for_each_entry(chain, &block->chain_list, list) {
3233 if ((tca[TCA_CHAIN] &&
3234 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3235 continue;
3236 if (index < index_start) {
3237 index++;
3238 continue;
3239 }
3240 if (tcf_chain_held_by_acts_only(chain))
3241 continue;
3242 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3243 chain->index, net, skb, block,
3244 NETLINK_CB(cb->skb).portid,
3245 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3246 RTM_NEWCHAIN, NULL);
3247 if (err <= 0)
3248 break;
3249 index++;
3250 }
3251 mutex_unlock(&block->lock);
3252
3253 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3254 tcf_block_refcnt_put(block, true);
3255 cb->args[0] = index;
3256
3257out:
3258 /* If we did no progress, the error (EMSGSIZE) is real */
3259 if (skb->len == 0 && err)
3260 return err;
3261 return skb->len;
3262}
3263
3264int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3265 int police, struct tcf_proto *tp, u32 handle,
3266 bool use_action_miss)
3267{
3268 int err = 0;
3269
3270#ifdef CONFIG_NET_CLS_ACT
3271 exts->type = 0;
3272 exts->nr_actions = 0;
3273 exts->miss_cookie_node = NULL;
3274 /* Note: we do not own yet a reference on net.
3275 * This reference might be taken later from tcf_exts_get_net().
3276 */
3277 exts->net = net;
3278 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3279 GFP_KERNEL);
3280 if (!exts->actions)
3281 return -ENOMEM;
3282#endif
3283
3284 exts->action = action;
3285 exts->police = police;
3286
3287 if (!use_action_miss)
3288 return 0;
3289
3290 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3291 if (err)
3292 goto err_miss_alloc;
3293
3294 return 0;
3295
3296err_miss_alloc:
3297 tcf_exts_destroy(exts);
3298#ifdef CONFIG_NET_CLS_ACT
3299 exts->actions = NULL;
3300#endif
3301 return err;
3302}
3303EXPORT_SYMBOL(tcf_exts_init_ex);
3304
3305void tcf_exts_destroy(struct tcf_exts *exts)
3306{
3307 tcf_exts_miss_cookie_base_destroy(exts);
3308
3309#ifdef CONFIG_NET_CLS_ACT
3310 if (exts->actions) {
3311 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3312 kfree(exts->actions);
3313 }
3314 exts->nr_actions = 0;
3315#endif
3316}
3317EXPORT_SYMBOL(tcf_exts_destroy);
3318
3319int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3320 struct nlattr *rate_tlv, struct tcf_exts *exts,
3321 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3322{
3323#ifdef CONFIG_NET_CLS_ACT
3324 {
3325 int init_res[TCA_ACT_MAX_PRIO] = {};
3326 struct tc_action *act;
3327 size_t attr_size = 0;
3328
3329 if (exts->police && tb[exts->police]) {
3330 struct tc_action_ops *a_o;
3331
3332 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3333 a_o = tc_action_load_ops(tb[exts->police], flags,
3334 extack);
3335 if (IS_ERR(a_o))
3336 return PTR_ERR(a_o);
3337 act = tcf_action_init_1(net, tp, tb[exts->police],
3338 rate_tlv, a_o, init_res, flags,
3339 extack);
3340 module_put(a_o->owner);
3341 if (IS_ERR(act))
3342 return PTR_ERR(act);
3343
3344 act->type = exts->type = TCA_OLD_COMPAT;
3345 exts->actions[0] = act;
3346 exts->nr_actions = 1;
3347 tcf_idr_insert_many(exts->actions, init_res);
3348 } else if (exts->action && tb[exts->action]) {
3349 int err;
3350
3351 flags |= TCA_ACT_FLAGS_BIND;
3352 err = tcf_action_init(net, tp, tb[exts->action],
3353 rate_tlv, exts->actions, init_res,
3354 &attr_size, flags, fl_flags,
3355 extack);
3356 if (err < 0)
3357 return err;
3358 exts->nr_actions = err;
3359 }
3360 }
3361#else
3362 if ((exts->action && tb[exts->action]) ||
3363 (exts->police && tb[exts->police])) {
3364 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3365 return -EOPNOTSUPP;
3366 }
3367#endif
3368
3369 return 0;
3370}
3371EXPORT_SYMBOL(tcf_exts_validate_ex);
3372
3373int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3374 struct nlattr *rate_tlv, struct tcf_exts *exts,
3375 u32 flags, struct netlink_ext_ack *extack)
3376{
3377 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3378 flags, 0, extack);
3379}
3380EXPORT_SYMBOL(tcf_exts_validate);
3381
3382void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3383{
3384#ifdef CONFIG_NET_CLS_ACT
3385 struct tcf_exts old = *dst;
3386
3387 *dst = *src;
3388 tcf_exts_destroy(&old);
3389#endif
3390}
3391EXPORT_SYMBOL(tcf_exts_change);
3392
3393#ifdef CONFIG_NET_CLS_ACT
3394static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3395{
3396 if (exts->nr_actions == 0)
3397 return NULL;
3398 else
3399 return exts->actions[0];
3400}
3401#endif
3402
3403int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3404{
3405#ifdef CONFIG_NET_CLS_ACT
3406 struct nlattr *nest;
3407
3408 if (exts->action && tcf_exts_has_actions(exts)) {
3409 /*
3410 * again for backward compatible mode - we want
3411 * to work with both old and new modes of entering
3412 * tc data even if iproute2 was newer - jhs
3413 */
3414 if (exts->type != TCA_OLD_COMPAT) {
3415 nest = nla_nest_start_noflag(skb, exts->action);
3416 if (nest == NULL)
3417 goto nla_put_failure;
3418
3419 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3420 < 0)
3421 goto nla_put_failure;
3422 nla_nest_end(skb, nest);
3423 } else if (exts->police) {
3424 struct tc_action *act = tcf_exts_first_act(exts);
3425 nest = nla_nest_start_noflag(skb, exts->police);
3426 if (nest == NULL || !act)
3427 goto nla_put_failure;
3428 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3429 goto nla_put_failure;
3430 nla_nest_end(skb, nest);
3431 }
3432 }
3433 return 0;
3434
3435nla_put_failure:
3436 nla_nest_cancel(skb, nest);
3437 return -1;
3438#else
3439 return 0;
3440#endif
3441}
3442EXPORT_SYMBOL(tcf_exts_dump);
3443
3444int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3445{
3446#ifdef CONFIG_NET_CLS_ACT
3447 struct nlattr *nest;
3448
3449 if (!exts->action || !tcf_exts_has_actions(exts))
3450 return 0;
3451
3452 nest = nla_nest_start_noflag(skb, exts->action);
3453 if (!nest)
3454 goto nla_put_failure;
3455
3456 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3457 goto nla_put_failure;
3458 nla_nest_end(skb, nest);
3459 return 0;
3460
3461nla_put_failure:
3462 nla_nest_cancel(skb, nest);
3463 return -1;
3464#else
3465 return 0;
3466#endif
3467}
3468EXPORT_SYMBOL(tcf_exts_terse_dump);
3469
3470int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3471{
3472#ifdef CONFIG_NET_CLS_ACT
3473 struct tc_action *a = tcf_exts_first_act(exts);
3474 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3475 return -1;
3476#endif
3477 return 0;
3478}
3479EXPORT_SYMBOL(tcf_exts_dump_stats);
3480
3481static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3482{
3483 if (*flags & TCA_CLS_FLAGS_IN_HW)
3484 return;
3485 *flags |= TCA_CLS_FLAGS_IN_HW;
3486 atomic_inc(&block->offloadcnt);
3487}
3488
3489static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3490{
3491 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3492 return;
3493 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3494 atomic_dec(&block->offloadcnt);
3495}
3496
3497static void tc_cls_offload_cnt_update(struct tcf_block *block,
3498 struct tcf_proto *tp, u32 *cnt,
3499 u32 *flags, u32 diff, bool add)
3500{
3501 lockdep_assert_held(&block->cb_lock);
3502
3503 spin_lock(&tp->lock);
3504 if (add) {
3505 if (!*cnt)
3506 tcf_block_offload_inc(block, flags);
3507 *cnt += diff;
3508 } else {
3509 *cnt -= diff;
3510 if (!*cnt)
3511 tcf_block_offload_dec(block, flags);
3512 }
3513 spin_unlock(&tp->lock);
3514}
3515
3516static void
3517tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3518 u32 *cnt, u32 *flags)
3519{
3520 lockdep_assert_held(&block->cb_lock);
3521
3522 spin_lock(&tp->lock);
3523 tcf_block_offload_dec(block, flags);
3524 *cnt = 0;
3525 spin_unlock(&tp->lock);
3526}
3527
3528static int
3529__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3530 void *type_data, bool err_stop)
3531{
3532 struct flow_block_cb *block_cb;
3533 int ok_count = 0;
3534 int err;
3535
3536 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3537 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3538 if (err) {
3539 if (err_stop)
3540 return err;
3541 } else {
3542 ok_count++;
3543 }
3544 }
3545 return ok_count;
3546}
3547
3548int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3549 void *type_data, bool err_stop, bool rtnl_held)
3550{
3551 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3552 int ok_count;
3553
3554retry:
3555 if (take_rtnl)
3556 rtnl_lock();
3557 down_read(&block->cb_lock);
3558 /* Need to obtain rtnl lock if block is bound to devs that require it.
3559 * In block bind code cb_lock is obtained while holding rtnl, so we must
3560 * obtain the locks in same order here.
3561 */
3562 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3563 up_read(&block->cb_lock);
3564 take_rtnl = true;
3565 goto retry;
3566 }
3567
3568 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3569
3570 up_read(&block->cb_lock);
3571 if (take_rtnl)
3572 rtnl_unlock();
3573 return ok_count;
3574}
3575EXPORT_SYMBOL(tc_setup_cb_call);
3576
3577/* Non-destructive filter add. If filter that wasn't already in hardware is
3578 * successfully offloaded, increment block offloads counter. On failure,
3579 * previously offloaded filter is considered to be intact and offloads counter
3580 * is not decremented.
3581 */
3582
3583int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3584 enum tc_setup_type type, void *type_data, bool err_stop,
3585 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3586{
3587 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3588 int ok_count;
3589
3590retry:
3591 if (take_rtnl)
3592 rtnl_lock();
3593 down_read(&block->cb_lock);
3594 /* Need to obtain rtnl lock if block is bound to devs that require it.
3595 * In block bind code cb_lock is obtained while holding rtnl, so we must
3596 * obtain the locks in same order here.
3597 */
3598 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3599 up_read(&block->cb_lock);
3600 take_rtnl = true;
3601 goto retry;
3602 }
3603
3604 /* Make sure all netdevs sharing this block are offload-capable. */
3605 if (block->nooffloaddevcnt && err_stop) {
3606 ok_count = -EOPNOTSUPP;
3607 goto err_unlock;
3608 }
3609
3610 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3611 if (ok_count < 0)
3612 goto err_unlock;
3613
3614 if (tp->ops->hw_add)
3615 tp->ops->hw_add(tp, type_data);
3616 if (ok_count > 0)
3617 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3618 ok_count, true);
3619err_unlock:
3620 up_read(&block->cb_lock);
3621 if (take_rtnl)
3622 rtnl_unlock();
3623 return min(ok_count, 0);
3624}
3625EXPORT_SYMBOL(tc_setup_cb_add);
3626
3627/* Destructive filter replace. If filter that wasn't already in hardware is
3628 * successfully offloaded, increment block offload counter. On failure,
3629 * previously offloaded filter is considered to be destroyed and offload counter
3630 * is decremented.
3631 */
3632
3633int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3634 enum tc_setup_type type, void *type_data, bool err_stop,
3635 u32 *old_flags, unsigned int *old_in_hw_count,
3636 u32 *new_flags, unsigned int *new_in_hw_count,
3637 bool rtnl_held)
3638{
3639 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3640 int ok_count;
3641
3642retry:
3643 if (take_rtnl)
3644 rtnl_lock();
3645 down_read(&block->cb_lock);
3646 /* Need to obtain rtnl lock if block is bound to devs that require it.
3647 * In block bind code cb_lock is obtained while holding rtnl, so we must
3648 * obtain the locks in same order here.
3649 */
3650 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3651 up_read(&block->cb_lock);
3652 take_rtnl = true;
3653 goto retry;
3654 }
3655
3656 /* Make sure all netdevs sharing this block are offload-capable. */
3657 if (block->nooffloaddevcnt && err_stop) {
3658 ok_count = -EOPNOTSUPP;
3659 goto err_unlock;
3660 }
3661
3662 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3663 if (tp->ops->hw_del)
3664 tp->ops->hw_del(tp, type_data);
3665
3666 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3667 if (ok_count < 0)
3668 goto err_unlock;
3669
3670 if (tp->ops->hw_add)
3671 tp->ops->hw_add(tp, type_data);
3672 if (ok_count > 0)
3673 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3674 new_flags, ok_count, true);
3675err_unlock:
3676 up_read(&block->cb_lock);
3677 if (take_rtnl)
3678 rtnl_unlock();
3679 return min(ok_count, 0);
3680}
3681EXPORT_SYMBOL(tc_setup_cb_replace);
3682
3683/* Destroy filter and decrement block offload counter, if filter was previously
3684 * offloaded.
3685 */
3686
3687int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3688 enum tc_setup_type type, void *type_data, bool err_stop,
3689 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3690{
3691 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3692 int ok_count;
3693
3694retry:
3695 if (take_rtnl)
3696 rtnl_lock();
3697 down_read(&block->cb_lock);
3698 /* Need to obtain rtnl lock if block is bound to devs that require it.
3699 * In block bind code cb_lock is obtained while holding rtnl, so we must
3700 * obtain the locks in same order here.
3701 */
3702 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3703 up_read(&block->cb_lock);
3704 take_rtnl = true;
3705 goto retry;
3706 }
3707
3708 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3709
3710 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3711 if (tp->ops->hw_del)
3712 tp->ops->hw_del(tp, type_data);
3713
3714 up_read(&block->cb_lock);
3715 if (take_rtnl)
3716 rtnl_unlock();
3717 return min(ok_count, 0);
3718}
3719EXPORT_SYMBOL(tc_setup_cb_destroy);
3720
3721int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3722 bool add, flow_setup_cb_t *cb,
3723 enum tc_setup_type type, void *type_data,
3724 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3725{
3726 int err = cb(type, type_data, cb_priv);
3727
3728 if (err) {
3729 if (add && tc_skip_sw(*flags))
3730 return err;
3731 } else {
3732 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3733 add);
3734 }
3735
3736 return 0;
3737}
3738EXPORT_SYMBOL(tc_setup_cb_reoffload);
3739
3740static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3741 const struct tc_action *act)
3742{
3743 struct tc_cookie *user_cookie;
3744 int err = 0;
3745
3746 rcu_read_lock();
3747 user_cookie = rcu_dereference(act->user_cookie);
3748 if (user_cookie) {
3749 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3750 user_cookie->len,
3751 GFP_ATOMIC);
3752 if (!entry->user_cookie)
3753 err = -ENOMEM;
3754 }
3755 rcu_read_unlock();
3756 return err;
3757}
3758
3759static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3760{
3761 flow_action_cookie_destroy(entry->user_cookie);
3762}
3763
3764void tc_cleanup_offload_action(struct flow_action *flow_action)
3765{
3766 struct flow_action_entry *entry;
3767 int i;
3768
3769 flow_action_for_each(i, entry, flow_action) {
3770 tcf_act_put_user_cookie(entry);
3771 if (entry->destructor)
3772 entry->destructor(entry->destructor_priv);
3773 }
3774}
3775EXPORT_SYMBOL(tc_cleanup_offload_action);
3776
3777static int tc_setup_offload_act(struct tc_action *act,
3778 struct flow_action_entry *entry,
3779 u32 *index_inc,
3780 struct netlink_ext_ack *extack)
3781{
3782#ifdef CONFIG_NET_CLS_ACT
3783 if (act->ops->offload_act_setup) {
3784 return act->ops->offload_act_setup(act, entry, index_inc, true,
3785 extack);
3786 } else {
3787 NL_SET_ERR_MSG(extack, "Action does not support offload");
3788 return -EOPNOTSUPP;
3789 }
3790#else
3791 return 0;
3792#endif
3793}
3794
3795int tc_setup_action(struct flow_action *flow_action,
3796 struct tc_action *actions[],
3797 u32 miss_cookie_base,
3798 struct netlink_ext_ack *extack)
3799{
3800 int i, j, k, index, err = 0;
3801 struct tc_action *act;
3802
3803 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3804 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3805 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3806
3807 if (!actions)
3808 return 0;
3809
3810 j = 0;
3811 tcf_act_for_each_action(i, act, actions) {
3812 struct flow_action_entry *entry;
3813
3814 entry = &flow_action->entries[j];
3815 spin_lock_bh(&act->tcfa_lock);
3816 err = tcf_act_get_user_cookie(entry, act);
3817 if (err)
3818 goto err_out_locked;
3819
3820 index = 0;
3821 err = tc_setup_offload_act(act, entry, &index, extack);
3822 if (err)
3823 goto err_out_locked;
3824
3825 for (k = 0; k < index ; k++) {
3826 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3827 entry[k].hw_index = act->tcfa_index;
3828 entry[k].cookie = (unsigned long)act;
3829 entry[k].miss_cookie =
3830 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3831 }
3832
3833 j += index;
3834
3835 spin_unlock_bh(&act->tcfa_lock);
3836 }
3837
3838err_out:
3839 if (err)
3840 tc_cleanup_offload_action(flow_action);
3841
3842 return err;
3843err_out_locked:
3844 spin_unlock_bh(&act->tcfa_lock);
3845 goto err_out;
3846}
3847
3848int tc_setup_offload_action(struct flow_action *flow_action,
3849 const struct tcf_exts *exts,
3850 struct netlink_ext_ack *extack)
3851{
3852#ifdef CONFIG_NET_CLS_ACT
3853 u32 miss_cookie_base;
3854
3855 if (!exts)
3856 return 0;
3857
3858 miss_cookie_base = exts->miss_cookie_node ?
3859 exts->miss_cookie_node->miss_cookie_base : 0;
3860 return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3861 extack);
3862#else
3863 return 0;
3864#endif
3865}
3866EXPORT_SYMBOL(tc_setup_offload_action);
3867
3868unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3869{
3870 unsigned int num_acts = 0;
3871 struct tc_action *act;
3872 int i;
3873
3874 tcf_exts_for_each_action(i, act, exts) {
3875 if (is_tcf_pedit(act))
3876 num_acts += tcf_pedit_nkeys(act);
3877 else
3878 num_acts++;
3879 }
3880 return num_acts;
3881}
3882EXPORT_SYMBOL(tcf_exts_num_actions);
3883
3884#ifdef CONFIG_NET_CLS_ACT
3885static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3886 u32 *p_block_index,
3887 struct netlink_ext_ack *extack)
3888{
3889 *p_block_index = nla_get_u32(block_index_attr);
3890 if (!*p_block_index) {
3891 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3892 return -EINVAL;
3893 }
3894
3895 return 0;
3896}
3897
3898int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3899 enum flow_block_binder_type binder_type,
3900 struct nlattr *block_index_attr,
3901 struct netlink_ext_ack *extack)
3902{
3903 u32 block_index;
3904 int err;
3905
3906 if (!block_index_attr)
3907 return 0;
3908
3909 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3910 if (err)
3911 return err;
3912
3913 qe->info.binder_type = binder_type;
3914 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3915 qe->info.chain_head_change_priv = &qe->filter_chain;
3916 qe->info.block_index = block_index;
3917
3918 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3919}
3920EXPORT_SYMBOL(tcf_qevent_init);
3921
3922void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3923{
3924 if (qe->info.block_index)
3925 tcf_block_put_ext(qe->block, sch, &qe->info);
3926}
3927EXPORT_SYMBOL(tcf_qevent_destroy);
3928
3929int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3930 struct netlink_ext_ack *extack)
3931{
3932 u32 block_index;
3933 int err;
3934
3935 if (!block_index_attr)
3936 return 0;
3937
3938 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3939 if (err)
3940 return err;
3941
3942 /* Bounce newly-configured block or change in block. */
3943 if (block_index != qe->info.block_index) {
3944 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3945 return -EINVAL;
3946 }
3947
3948 return 0;
3949}
3950EXPORT_SYMBOL(tcf_qevent_validate_change);
3951
3952struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3953 struct sk_buff **to_free, int *ret)
3954{
3955 struct tcf_result cl_res;
3956 struct tcf_proto *fl;
3957
3958 if (!qe->info.block_index)
3959 return skb;
3960
3961 fl = rcu_dereference_bh(qe->filter_chain);
3962
3963 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3964 case TC_ACT_SHOT:
3965 qdisc_qstats_drop(sch);
3966 __qdisc_drop(skb, to_free);
3967 *ret = __NET_XMIT_BYPASS;
3968 return NULL;
3969 case TC_ACT_STOLEN:
3970 case TC_ACT_QUEUED:
3971 case TC_ACT_TRAP:
3972 __qdisc_drop(skb, to_free);
3973 *ret = __NET_XMIT_STOLEN;
3974 return NULL;
3975 case TC_ACT_REDIRECT:
3976 skb_do_redirect(skb);
3977 *ret = __NET_XMIT_STOLEN;
3978 return NULL;
3979 }
3980
3981 return skb;
3982}
3983EXPORT_SYMBOL(tcf_qevent_handle);
3984
3985int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3986{
3987 if (!qe->info.block_index)
3988 return 0;
3989 return nla_put_u32(skb, attr_name, qe->info.block_index);
3990}
3991EXPORT_SYMBOL(tcf_qevent_dump);
3992#endif
3993
3994static __net_init int tcf_net_init(struct net *net)
3995{
3996 struct tcf_net *tn = net_generic(net, tcf_net_id);
3997
3998 spin_lock_init(&tn->idr_lock);
3999 idr_init(&tn->idr);
4000 return 0;
4001}
4002
4003static void __net_exit tcf_net_exit(struct net *net)
4004{
4005 struct tcf_net *tn = net_generic(net, tcf_net_id);
4006
4007 idr_destroy(&tn->idr);
4008}
4009
4010static struct pernet_operations tcf_net_ops = {
4011 .init = tcf_net_init,
4012 .exit = tcf_net_exit,
4013 .id = &tcf_net_id,
4014 .size = sizeof(struct tcf_net),
4015};
4016
4017static int __init tc_filter_init(void)
4018{
4019 int err;
4020
4021 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4022 if (!tc_filter_wq)
4023 return -ENOMEM;
4024
4025 err = register_pernet_subsys(&tcf_net_ops);
4026 if (err)
4027 goto err_register_pernet_subsys;
4028
4029 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4030
4031 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
4032 RTNL_FLAG_DOIT_UNLOCKED);
4033 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
4034 RTNL_FLAG_DOIT_UNLOCKED);
4035 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
4036 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
4037 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
4038 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
4039 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
4040 tc_dump_chain, 0);
4041
4042 return 0;
4043
4044err_register_pernet_subsys:
4045 destroy_workqueue(tc_filter_wq);
4046 return err;
4047}
4048
4049subsys_initcall(tc_filter_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_api.c Packet classifier API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/kmod.h>
21#include <linux/slab.h>
22#include <linux/idr.h>
23#include <linux/jhash.h>
24#include <linux/rculist.h>
25#include <linux/rhashtable.h>
26#include <net/net_namespace.h>
27#include <net/sock.h>
28#include <net/netlink.h>
29#include <net/pkt_sched.h>
30#include <net/pkt_cls.h>
31#include <net/tc_act/tc_pedit.h>
32#include <net/tc_act/tc_mirred.h>
33#include <net/tc_act/tc_vlan.h>
34#include <net/tc_act/tc_tunnel_key.h>
35#include <net/tc_act/tc_csum.h>
36#include <net/tc_act/tc_gact.h>
37#include <net/tc_act/tc_police.h>
38#include <net/tc_act/tc_sample.h>
39#include <net/tc_act/tc_skbedit.h>
40#include <net/tc_act/tc_ct.h>
41#include <net/tc_act/tc_mpls.h>
42#include <net/tc_act/tc_gate.h>
43#include <net/flow_offload.h>
44#include <net/tc_wrapper.h>
45
46/* The list of all installed classifier types */
47static LIST_HEAD(tcf_proto_base);
48
49/* Protects list of registered TC modules. It is pure SMP lock. */
50static DEFINE_RWLOCK(cls_mod_lock);
51
52static struct xarray tcf_exts_miss_cookies_xa;
53struct tcf_exts_miss_cookie_node {
54 const struct tcf_chain *chain;
55 const struct tcf_proto *tp;
56 const struct tcf_exts *exts;
57 u32 chain_index;
58 u32 tp_prio;
59 u32 handle;
60 u32 miss_cookie_base;
61 struct rcu_head rcu;
62};
63
64/* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
65 * action index in the exts tc actions array.
66 */
67union tcf_exts_miss_cookie {
68 struct {
69 u32 miss_cookie_base;
70 u32 act_index;
71 };
72 u64 miss_cookie;
73};
74
75#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
76static int
77tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
78 u32 handle)
79{
80 struct tcf_exts_miss_cookie_node *n;
81 static u32 next;
82 int err;
83
84 if (WARN_ON(!handle || !tp->ops->get_exts))
85 return -EINVAL;
86
87 n = kzalloc(sizeof(*n), GFP_KERNEL);
88 if (!n)
89 return -ENOMEM;
90
91 n->chain_index = tp->chain->index;
92 n->chain = tp->chain;
93 n->tp_prio = tp->prio;
94 n->tp = tp;
95 n->exts = exts;
96 n->handle = handle;
97
98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
99 n, xa_limit_32b, &next, GFP_KERNEL);
100 if (err < 0)
101 goto err_xa_alloc;
102
103 exts->miss_cookie_node = n;
104 return 0;
105
106err_xa_alloc:
107 kfree(n);
108 return err;
109}
110
111static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
112{
113 struct tcf_exts_miss_cookie_node *n;
114
115 if (!exts->miss_cookie_node)
116 return;
117
118 n = exts->miss_cookie_node;
119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
120 kfree_rcu(n, rcu);
121}
122
123static struct tcf_exts_miss_cookie_node *
124tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
125{
126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
127
128 *act_index = mc.act_index;
129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
130}
131#else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
132static int
133tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
134 u32 handle)
135{
136 return 0;
137}
138
139static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
140{
141}
142#endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
143
144static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
145{
146 union tcf_exts_miss_cookie mc = { .act_index = act_index, };
147
148 if (!miss_cookie_base)
149 return 0;
150
151 mc.miss_cookie_base = miss_cookie_base;
152 return mc.miss_cookie;
153}
154
155#ifdef CONFIG_NET_CLS_ACT
156DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
157EXPORT_SYMBOL(tc_skb_ext_tc);
158
159void tc_skb_ext_tc_enable(void)
160{
161 static_branch_inc(&tc_skb_ext_tc);
162}
163EXPORT_SYMBOL(tc_skb_ext_tc_enable);
164
165void tc_skb_ext_tc_disable(void)
166{
167 static_branch_dec(&tc_skb_ext_tc);
168}
169EXPORT_SYMBOL(tc_skb_ext_tc_disable);
170#endif
171
172static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
173{
174 return jhash_3words(tp->chain->index, tp->prio,
175 (__force __u32)tp->protocol, 0);
176}
177
178static void tcf_proto_signal_destroying(struct tcf_chain *chain,
179 struct tcf_proto *tp)
180{
181 struct tcf_block *block = chain->block;
182
183 mutex_lock(&block->proto_destroy_lock);
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
185 destroy_obj_hashfn(tp));
186 mutex_unlock(&block->proto_destroy_lock);
187}
188
189static bool tcf_proto_cmp(const struct tcf_proto *tp1,
190 const struct tcf_proto *tp2)
191{
192 return tp1->chain->index == tp2->chain->index &&
193 tp1->prio == tp2->prio &&
194 tp1->protocol == tp2->protocol;
195}
196
197static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
198 struct tcf_proto *tp)
199{
200 u32 hash = destroy_obj_hashfn(tp);
201 struct tcf_proto *iter;
202 bool found = false;
203
204 rcu_read_lock();
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
206 destroy_ht_node, hash) {
207 if (tcf_proto_cmp(tp, iter)) {
208 found = true;
209 break;
210 }
211 }
212 rcu_read_unlock();
213
214 return found;
215}
216
217static void
218tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
219{
220 struct tcf_block *block = chain->block;
221
222 mutex_lock(&block->proto_destroy_lock);
223 if (hash_hashed(&tp->destroy_ht_node))
224 hash_del_rcu(&tp->destroy_ht_node);
225 mutex_unlock(&block->proto_destroy_lock);
226}
227
228/* Find classifier type by string name */
229
230static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
231{
232 const struct tcf_proto_ops *t, *res = NULL;
233
234 if (kind) {
235 read_lock(&cls_mod_lock);
236 list_for_each_entry(t, &tcf_proto_base, head) {
237 if (strcmp(kind, t->kind) == 0) {
238 if (try_module_get(t->owner))
239 res = t;
240 break;
241 }
242 }
243 read_unlock(&cls_mod_lock);
244 }
245 return res;
246}
247
248static const struct tcf_proto_ops *
249tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
250 struct netlink_ext_ack *extack)
251{
252 const struct tcf_proto_ops *ops;
253
254 ops = __tcf_proto_lookup_ops(kind);
255 if (ops)
256 return ops;
257#ifdef CONFIG_MODULES
258 if (rtnl_held)
259 rtnl_unlock();
260 request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
261 if (rtnl_held)
262 rtnl_lock();
263 ops = __tcf_proto_lookup_ops(kind);
264 /* We dropped the RTNL semaphore in order to perform
265 * the module load. So, even if we succeeded in loading
266 * the module we have to replay the request. We indicate
267 * this using -EAGAIN.
268 */
269 if (ops) {
270 module_put(ops->owner);
271 return ERR_PTR(-EAGAIN);
272 }
273#endif
274 NL_SET_ERR_MSG(extack, "TC classifier not found");
275 return ERR_PTR(-ENOENT);
276}
277
278/* Register(unregister) new classifier type */
279
280int register_tcf_proto_ops(struct tcf_proto_ops *ops)
281{
282 struct tcf_proto_ops *t;
283 int rc = -EEXIST;
284
285 write_lock(&cls_mod_lock);
286 list_for_each_entry(t, &tcf_proto_base, head)
287 if (!strcmp(ops->kind, t->kind))
288 goto out;
289
290 list_add_tail(&ops->head, &tcf_proto_base);
291 rc = 0;
292out:
293 write_unlock(&cls_mod_lock);
294 return rc;
295}
296EXPORT_SYMBOL(register_tcf_proto_ops);
297
298static struct workqueue_struct *tc_filter_wq;
299
300void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
301{
302 struct tcf_proto_ops *t;
303 int rc = -ENOENT;
304
305 /* Wait for outstanding call_rcu()s, if any, from a
306 * tcf_proto_ops's destroy() handler.
307 */
308 rcu_barrier();
309 flush_workqueue(tc_filter_wq);
310
311 write_lock(&cls_mod_lock);
312 list_for_each_entry(t, &tcf_proto_base, head) {
313 if (t == ops) {
314 list_del(&t->head);
315 rc = 0;
316 break;
317 }
318 }
319 write_unlock(&cls_mod_lock);
320
321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
322}
323EXPORT_SYMBOL(unregister_tcf_proto_ops);
324
325bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
326{
327 INIT_RCU_WORK(rwork, func);
328 return queue_rcu_work(tc_filter_wq, rwork);
329}
330EXPORT_SYMBOL(tcf_queue_work);
331
332/* Select new prio value from the range, managed by kernel. */
333
334static inline u32 tcf_auto_prio(struct tcf_proto *tp)
335{
336 u32 first = TC_H_MAKE(0xC0000000U, 0U);
337
338 if (tp)
339 first = tp->prio - 1;
340
341 return TC_H_MAJ(first);
342}
343
344static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
345{
346 if (kind)
347 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
348 memset(name, 0, IFNAMSIZ);
349 return false;
350}
351
352static bool tcf_proto_is_unlocked(const char *kind)
353{
354 const struct tcf_proto_ops *ops;
355 bool ret;
356
357 if (strlen(kind) == 0)
358 return false;
359
360 ops = tcf_proto_lookup_ops(kind, false, NULL);
361 /* On error return false to take rtnl lock. Proto lookup/create
362 * functions will perform lookup again and properly handle errors.
363 */
364 if (IS_ERR(ops))
365 return false;
366
367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
368 module_put(ops->owner);
369 return ret;
370}
371
372static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
373 u32 prio, struct tcf_chain *chain,
374 bool rtnl_held,
375 struct netlink_ext_ack *extack)
376{
377 struct tcf_proto *tp;
378 int err;
379
380 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
381 if (!tp)
382 return ERR_PTR(-ENOBUFS);
383
384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
385 if (IS_ERR(tp->ops)) {
386 err = PTR_ERR(tp->ops);
387 goto errout;
388 }
389 tp->classify = tp->ops->classify;
390 tp->protocol = protocol;
391 tp->prio = prio;
392 tp->chain = chain;
393 tp->usesw = !tp->ops->reoffload;
394 spin_lock_init(&tp->lock);
395 refcount_set(&tp->refcnt, 1);
396
397 err = tp->ops->init(tp);
398 if (err) {
399 module_put(tp->ops->owner);
400 goto errout;
401 }
402 return tp;
403
404errout:
405 kfree(tp);
406 return ERR_PTR(err);
407}
408
409static void tcf_proto_get(struct tcf_proto *tp)
410{
411 refcount_inc(&tp->refcnt);
412}
413
414static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
415{
416#ifdef CONFIG_NET_CLS_ACT
417 struct tcf_block *block = tp->chain->block;
418 bool counted = false;
419
420 if (!add) {
421 if (tp->usesw && tp->counted) {
422 if (!atomic_dec_return(&block->useswcnt))
423 static_branch_dec(&tcf_sw_enabled_key);
424 tp->counted = false;
425 }
426 return;
427 }
428
429 spin_lock(&tp->lock);
430 if (tp->usesw && !tp->counted) {
431 counted = true;
432 tp->counted = true;
433 }
434 spin_unlock(&tp->lock);
435
436 if (counted && atomic_inc_return(&block->useswcnt) == 1)
437 static_branch_inc(&tcf_sw_enabled_key);
438#endif
439}
440
441static void tcf_chain_put(struct tcf_chain *chain);
442
443static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
444 bool sig_destroy, struct netlink_ext_ack *extack)
445{
446 tp->ops->destroy(tp, rtnl_held, extack);
447 tcf_proto_count_usesw(tp, false);
448 if (sig_destroy)
449 tcf_proto_signal_destroyed(tp->chain, tp);
450 tcf_chain_put(tp->chain);
451 module_put(tp->ops->owner);
452 kfree_rcu(tp, rcu);
453}
454
455static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
456 struct netlink_ext_ack *extack)
457{
458 if (refcount_dec_and_test(&tp->refcnt))
459 tcf_proto_destroy(tp, rtnl_held, true, extack);
460}
461
462static bool tcf_proto_check_delete(struct tcf_proto *tp)
463{
464 if (tp->ops->delete_empty)
465 return tp->ops->delete_empty(tp);
466
467 tp->deleting = true;
468 return tp->deleting;
469}
470
471static void tcf_proto_mark_delete(struct tcf_proto *tp)
472{
473 spin_lock(&tp->lock);
474 tp->deleting = true;
475 spin_unlock(&tp->lock);
476}
477
478static bool tcf_proto_is_deleting(struct tcf_proto *tp)
479{
480 bool deleting;
481
482 spin_lock(&tp->lock);
483 deleting = tp->deleting;
484 spin_unlock(&tp->lock);
485
486 return deleting;
487}
488
489#define ASSERT_BLOCK_LOCKED(block) \
490 lockdep_assert_held(&(block)->lock)
491
492struct tcf_filter_chain_list_item {
493 struct list_head list;
494 tcf_chain_head_change_t *chain_head_change;
495 void *chain_head_change_priv;
496};
497
498static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
499 u32 chain_index)
500{
501 struct tcf_chain *chain;
502
503 ASSERT_BLOCK_LOCKED(block);
504
505 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
506 if (!chain)
507 return NULL;
508 list_add_tail_rcu(&chain->list, &block->chain_list);
509 mutex_init(&chain->filter_chain_lock);
510 chain->block = block;
511 chain->index = chain_index;
512 chain->refcnt = 1;
513 if (!chain->index)
514 block->chain0.chain = chain;
515 return chain;
516}
517
518static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
519 struct tcf_proto *tp_head)
520{
521 if (item->chain_head_change)
522 item->chain_head_change(tp_head, item->chain_head_change_priv);
523}
524
525static void tcf_chain0_head_change(struct tcf_chain *chain,
526 struct tcf_proto *tp_head)
527{
528 struct tcf_filter_chain_list_item *item;
529 struct tcf_block *block = chain->block;
530
531 if (chain->index)
532 return;
533
534 mutex_lock(&block->lock);
535 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
536 tcf_chain_head_change_item(item, tp_head);
537 mutex_unlock(&block->lock);
538}
539
540/* Returns true if block can be safely freed. */
541
542static bool tcf_chain_detach(struct tcf_chain *chain)
543{
544 struct tcf_block *block = chain->block;
545
546 ASSERT_BLOCK_LOCKED(block);
547
548 list_del_rcu(&chain->list);
549 if (!chain->index)
550 block->chain0.chain = NULL;
551
552 if (list_empty(&block->chain_list) &&
553 refcount_read(&block->refcnt) == 0)
554 return true;
555
556 return false;
557}
558
559static void tcf_block_destroy(struct tcf_block *block)
560{
561 mutex_destroy(&block->lock);
562 mutex_destroy(&block->proto_destroy_lock);
563 xa_destroy(&block->ports);
564 kfree_rcu(block, rcu);
565}
566
567static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
568{
569 struct tcf_block *block = chain->block;
570
571 mutex_destroy(&chain->filter_chain_lock);
572 kfree_rcu(chain, rcu);
573 if (free_block)
574 tcf_block_destroy(block);
575}
576
577static void tcf_chain_hold(struct tcf_chain *chain)
578{
579 ASSERT_BLOCK_LOCKED(chain->block);
580
581 ++chain->refcnt;
582}
583
584static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
585{
586 ASSERT_BLOCK_LOCKED(chain->block);
587
588 /* In case all the references are action references, this
589 * chain should not be shown to the user.
590 */
591 return chain->refcnt == chain->action_refcnt;
592}
593
594static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
595 u32 chain_index)
596{
597 struct tcf_chain *chain;
598
599 ASSERT_BLOCK_LOCKED(block);
600
601 list_for_each_entry(chain, &block->chain_list, list) {
602 if (chain->index == chain_index)
603 return chain;
604 }
605 return NULL;
606}
607
608#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
609static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
610 u32 chain_index)
611{
612 struct tcf_chain *chain;
613
614 list_for_each_entry_rcu(chain, &block->chain_list, list) {
615 if (chain->index == chain_index)
616 return chain;
617 }
618 return NULL;
619}
620#endif
621
622static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
623 u32 seq, u16 flags, int event, bool unicast,
624 struct netlink_ext_ack *extack);
625
626static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
627 u32 chain_index, bool create,
628 bool by_act)
629{
630 struct tcf_chain *chain = NULL;
631 bool is_first_reference;
632
633 mutex_lock(&block->lock);
634 chain = tcf_chain_lookup(block, chain_index);
635 if (chain) {
636 tcf_chain_hold(chain);
637 } else {
638 if (!create)
639 goto errout;
640 chain = tcf_chain_create(block, chain_index);
641 if (!chain)
642 goto errout;
643 }
644
645 if (by_act)
646 ++chain->action_refcnt;
647 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
648 mutex_unlock(&block->lock);
649
650 /* Send notification only in case we got the first
651 * non-action reference. Until then, the chain acts only as
652 * a placeholder for actions pointing to it and user ought
653 * not know about them.
654 */
655 if (is_first_reference && !by_act)
656 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
657 RTM_NEWCHAIN, false, NULL);
658
659 return chain;
660
661errout:
662 mutex_unlock(&block->lock);
663 return chain;
664}
665
666static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
667 bool create)
668{
669 return __tcf_chain_get(block, chain_index, create, false);
670}
671
672struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
673{
674 return __tcf_chain_get(block, chain_index, true, true);
675}
676EXPORT_SYMBOL(tcf_chain_get_by_act);
677
678static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
679 void *tmplt_priv);
680static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
681 void *tmplt_priv, u32 chain_index,
682 struct tcf_block *block, struct sk_buff *oskb,
683 u32 seq, u16 flags);
684
685static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
686 bool explicitly_created)
687{
688 struct tcf_block *block = chain->block;
689 const struct tcf_proto_ops *tmplt_ops;
690 unsigned int refcnt, non_act_refcnt;
691 bool free_block = false;
692 void *tmplt_priv;
693
694 mutex_lock(&block->lock);
695 if (explicitly_created) {
696 if (!chain->explicitly_created) {
697 mutex_unlock(&block->lock);
698 return;
699 }
700 chain->explicitly_created = false;
701 }
702
703 if (by_act)
704 chain->action_refcnt--;
705
706 /* tc_chain_notify_delete can't be called while holding block lock.
707 * However, when block is unlocked chain can be changed concurrently, so
708 * save these to temporary variables.
709 */
710 refcnt = --chain->refcnt;
711 non_act_refcnt = refcnt - chain->action_refcnt;
712 tmplt_ops = chain->tmplt_ops;
713 tmplt_priv = chain->tmplt_priv;
714
715 if (non_act_refcnt == chain->explicitly_created && !by_act) {
716 if (non_act_refcnt == 0)
717 tc_chain_notify_delete(tmplt_ops, tmplt_priv,
718 chain->index, block, NULL, 0, 0);
719 /* Last reference to chain, no need to lock. */
720 chain->flushing = false;
721 }
722
723 if (refcnt == 0)
724 free_block = tcf_chain_detach(chain);
725 mutex_unlock(&block->lock);
726
727 if (refcnt == 0) {
728 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
729 tcf_chain_destroy(chain, free_block);
730 }
731}
732
733static void tcf_chain_put(struct tcf_chain *chain)
734{
735 __tcf_chain_put(chain, false, false);
736}
737
738void tcf_chain_put_by_act(struct tcf_chain *chain)
739{
740 __tcf_chain_put(chain, true, false);
741}
742EXPORT_SYMBOL(tcf_chain_put_by_act);
743
744static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
745{
746 __tcf_chain_put(chain, false, true);
747}
748
749static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
750{
751 struct tcf_proto *tp, *tp_next;
752
753 mutex_lock(&chain->filter_chain_lock);
754 tp = tcf_chain_dereference(chain->filter_chain, chain);
755 while (tp) {
756 tp_next = rcu_dereference_protected(tp->next, 1);
757 tcf_proto_signal_destroying(chain, tp);
758 tp = tp_next;
759 }
760 tp = tcf_chain_dereference(chain->filter_chain, chain);
761 RCU_INIT_POINTER(chain->filter_chain, NULL);
762 tcf_chain0_head_change(chain, NULL);
763 chain->flushing = true;
764 mutex_unlock(&chain->filter_chain_lock);
765
766 while (tp) {
767 tp_next = rcu_dereference_protected(tp->next, 1);
768 tcf_proto_put(tp, rtnl_held, NULL);
769 tp = tp_next;
770 }
771}
772
773static int tcf_block_setup(struct tcf_block *block,
774 struct flow_block_offload *bo);
775
776static void tcf_block_offload_init(struct flow_block_offload *bo,
777 struct net_device *dev, struct Qdisc *sch,
778 enum flow_block_command command,
779 enum flow_block_binder_type binder_type,
780 struct flow_block *flow_block,
781 bool shared, struct netlink_ext_ack *extack)
782{
783 bo->net = dev_net(dev);
784 bo->command = command;
785 bo->binder_type = binder_type;
786 bo->block = flow_block;
787 bo->block_shared = shared;
788 bo->extack = extack;
789 bo->sch = sch;
790 bo->cb_list_head = &flow_block->cb_list;
791 INIT_LIST_HEAD(&bo->cb_list);
792}
793
794static void tcf_block_unbind(struct tcf_block *block,
795 struct flow_block_offload *bo);
796
797static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
798{
799 struct tcf_block *block = block_cb->indr.data;
800 struct net_device *dev = block_cb->indr.dev;
801 struct Qdisc *sch = block_cb->indr.sch;
802 struct netlink_ext_ack extack = {};
803 struct flow_block_offload bo = {};
804
805 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
806 block_cb->indr.binder_type,
807 &block->flow_block, tcf_block_shared(block),
808 &extack);
809 rtnl_lock();
810 down_write(&block->cb_lock);
811 list_del(&block_cb->driver_list);
812 list_move(&block_cb->list, &bo.cb_list);
813 tcf_block_unbind(block, &bo);
814 up_write(&block->cb_lock);
815 rtnl_unlock();
816}
817
818static bool tcf_block_offload_in_use(struct tcf_block *block)
819{
820 return atomic_read(&block->offloadcnt);
821}
822
823static int tcf_block_offload_cmd(struct tcf_block *block,
824 struct net_device *dev, struct Qdisc *sch,
825 struct tcf_block_ext_info *ei,
826 enum flow_block_command command,
827 struct netlink_ext_ack *extack)
828{
829 struct flow_block_offload bo = {};
830
831 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
832 &block->flow_block, tcf_block_shared(block),
833 extack);
834
835 if (dev->netdev_ops->ndo_setup_tc) {
836 int err;
837
838 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
839 if (err < 0) {
840 if (err != -EOPNOTSUPP)
841 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
842 return err;
843 }
844
845 return tcf_block_setup(block, &bo);
846 }
847
848 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
849 tc_block_indr_cleanup);
850 tcf_block_setup(block, &bo);
851
852 return -EOPNOTSUPP;
853}
854
855static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
856 struct tcf_block_ext_info *ei,
857 struct netlink_ext_ack *extack)
858{
859 struct net_device *dev = q->dev_queue->dev;
860 int err;
861
862 down_write(&block->cb_lock);
863
864 /* If tc offload feature is disabled and the block we try to bind
865 * to already has some offloaded filters, forbid to bind.
866 */
867 if (dev->netdev_ops->ndo_setup_tc &&
868 !tc_can_offload(dev) &&
869 tcf_block_offload_in_use(block)) {
870 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
871 err = -EOPNOTSUPP;
872 goto err_unlock;
873 }
874
875 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
876 if (err == -EOPNOTSUPP)
877 goto no_offload_dev_inc;
878 if (err)
879 goto err_unlock;
880
881 up_write(&block->cb_lock);
882 return 0;
883
884no_offload_dev_inc:
885 if (tcf_block_offload_in_use(block))
886 goto err_unlock;
887
888 err = 0;
889 block->nooffloaddevcnt++;
890err_unlock:
891 up_write(&block->cb_lock);
892 return err;
893}
894
895static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
896 struct tcf_block_ext_info *ei)
897{
898 struct net_device *dev = q->dev_queue->dev;
899 int err;
900
901 down_write(&block->cb_lock);
902 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
903 if (err == -EOPNOTSUPP)
904 goto no_offload_dev_dec;
905 up_write(&block->cb_lock);
906 return;
907
908no_offload_dev_dec:
909 WARN_ON(block->nooffloaddevcnt-- == 0);
910 up_write(&block->cb_lock);
911}
912
913static int
914tcf_chain0_head_change_cb_add(struct tcf_block *block,
915 struct tcf_block_ext_info *ei,
916 struct netlink_ext_ack *extack)
917{
918 struct tcf_filter_chain_list_item *item;
919 struct tcf_chain *chain0;
920
921 item = kmalloc(sizeof(*item), GFP_KERNEL);
922 if (!item) {
923 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
924 return -ENOMEM;
925 }
926 item->chain_head_change = ei->chain_head_change;
927 item->chain_head_change_priv = ei->chain_head_change_priv;
928
929 mutex_lock(&block->lock);
930 chain0 = block->chain0.chain;
931 if (chain0)
932 tcf_chain_hold(chain0);
933 else
934 list_add(&item->list, &block->chain0.filter_chain_list);
935 mutex_unlock(&block->lock);
936
937 if (chain0) {
938 struct tcf_proto *tp_head;
939
940 mutex_lock(&chain0->filter_chain_lock);
941
942 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
943 if (tp_head)
944 tcf_chain_head_change_item(item, tp_head);
945
946 mutex_lock(&block->lock);
947 list_add(&item->list, &block->chain0.filter_chain_list);
948 mutex_unlock(&block->lock);
949
950 mutex_unlock(&chain0->filter_chain_lock);
951 tcf_chain_put(chain0);
952 }
953
954 return 0;
955}
956
957static void
958tcf_chain0_head_change_cb_del(struct tcf_block *block,
959 struct tcf_block_ext_info *ei)
960{
961 struct tcf_filter_chain_list_item *item;
962
963 mutex_lock(&block->lock);
964 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
965 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
966 (item->chain_head_change == ei->chain_head_change &&
967 item->chain_head_change_priv == ei->chain_head_change_priv)) {
968 if (block->chain0.chain)
969 tcf_chain_head_change_item(item, NULL);
970 list_del(&item->list);
971 mutex_unlock(&block->lock);
972
973 kfree(item);
974 return;
975 }
976 }
977 mutex_unlock(&block->lock);
978 WARN_ON(1);
979}
980
981struct tcf_net {
982 spinlock_t idr_lock; /* Protects idr */
983 struct idr idr;
984};
985
986static unsigned int tcf_net_id;
987
988static int tcf_block_insert(struct tcf_block *block, struct net *net,
989 struct netlink_ext_ack *extack)
990{
991 struct tcf_net *tn = net_generic(net, tcf_net_id);
992 int err;
993
994 idr_preload(GFP_KERNEL);
995 spin_lock(&tn->idr_lock);
996 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
997 GFP_NOWAIT);
998 spin_unlock(&tn->idr_lock);
999 idr_preload_end();
1000
1001 return err;
1002}
1003
1004static void tcf_block_remove(struct tcf_block *block, struct net *net)
1005{
1006 struct tcf_net *tn = net_generic(net, tcf_net_id);
1007
1008 spin_lock(&tn->idr_lock);
1009 idr_remove(&tn->idr, block->index);
1010 spin_unlock(&tn->idr_lock);
1011}
1012
1013static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
1014 u32 block_index,
1015 struct netlink_ext_ack *extack)
1016{
1017 struct tcf_block *block;
1018
1019 block = kzalloc(sizeof(*block), GFP_KERNEL);
1020 if (!block) {
1021 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
1022 return ERR_PTR(-ENOMEM);
1023 }
1024 mutex_init(&block->lock);
1025 mutex_init(&block->proto_destroy_lock);
1026 init_rwsem(&block->cb_lock);
1027 flow_block_init(&block->flow_block);
1028 INIT_LIST_HEAD(&block->chain_list);
1029 INIT_LIST_HEAD(&block->owner_list);
1030 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1031
1032 refcount_set(&block->refcnt, 1);
1033 block->net = net;
1034 block->index = block_index;
1035 xa_init(&block->ports);
1036
1037 /* Don't store q pointer for blocks which are shared */
1038 if (!tcf_block_shared(block))
1039 block->q = q;
1040 return block;
1041}
1042
1043struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1044{
1045 struct tcf_net *tn = net_generic(net, tcf_net_id);
1046
1047 return idr_find(&tn->idr, block_index);
1048}
1049EXPORT_SYMBOL(tcf_block_lookup);
1050
1051static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1052{
1053 struct tcf_block *block;
1054
1055 rcu_read_lock();
1056 block = tcf_block_lookup(net, block_index);
1057 if (block && !refcount_inc_not_zero(&block->refcnt))
1058 block = NULL;
1059 rcu_read_unlock();
1060
1061 return block;
1062}
1063
1064static struct tcf_chain *
1065__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1066{
1067 mutex_lock(&block->lock);
1068 if (chain)
1069 chain = list_is_last(&chain->list, &block->chain_list) ?
1070 NULL : list_next_entry(chain, list);
1071 else
1072 chain = list_first_entry_or_null(&block->chain_list,
1073 struct tcf_chain, list);
1074
1075 /* skip all action-only chains */
1076 while (chain && tcf_chain_held_by_acts_only(chain))
1077 chain = list_is_last(&chain->list, &block->chain_list) ?
1078 NULL : list_next_entry(chain, list);
1079
1080 if (chain)
1081 tcf_chain_hold(chain);
1082 mutex_unlock(&block->lock);
1083
1084 return chain;
1085}
1086
1087/* Function to be used by all clients that want to iterate over all chains on
1088 * block. It properly obtains block->lock and takes reference to chain before
1089 * returning it. Users of this function must be tolerant to concurrent chain
1090 * insertion/deletion or ensure that no concurrent chain modification is
1091 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1092 * consistent dump because rtnl lock is released each time skb is filled with
1093 * data and sent to user-space.
1094 */
1095
1096struct tcf_chain *
1097tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1098{
1099 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1100
1101 if (chain)
1102 tcf_chain_put(chain);
1103
1104 return chain_next;
1105}
1106EXPORT_SYMBOL(tcf_get_next_chain);
1107
1108static struct tcf_proto *
1109__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1110{
1111 u32 prio = 0;
1112
1113 ASSERT_RTNL();
1114 mutex_lock(&chain->filter_chain_lock);
1115
1116 if (!tp) {
1117 tp = tcf_chain_dereference(chain->filter_chain, chain);
1118 } else if (tcf_proto_is_deleting(tp)) {
1119 /* 'deleting' flag is set and chain->filter_chain_lock was
1120 * unlocked, which means next pointer could be invalid. Restart
1121 * search.
1122 */
1123 prio = tp->prio + 1;
1124 tp = tcf_chain_dereference(chain->filter_chain, chain);
1125
1126 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1127 if (!tp->deleting && tp->prio >= prio)
1128 break;
1129 } else {
1130 tp = tcf_chain_dereference(tp->next, chain);
1131 }
1132
1133 if (tp)
1134 tcf_proto_get(tp);
1135
1136 mutex_unlock(&chain->filter_chain_lock);
1137
1138 return tp;
1139}
1140
1141/* Function to be used by all clients that want to iterate over all tp's on
1142 * chain. Users of this function must be tolerant to concurrent tp
1143 * insertion/deletion or ensure that no concurrent chain modification is
1144 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1145 * consistent dump because rtnl lock is released each time skb is filled with
1146 * data and sent to user-space.
1147 */
1148
1149struct tcf_proto *
1150tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1151{
1152 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1153
1154 if (tp)
1155 tcf_proto_put(tp, true, NULL);
1156
1157 return tp_next;
1158}
1159EXPORT_SYMBOL(tcf_get_next_proto);
1160
1161static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1162{
1163 struct tcf_chain *chain;
1164
1165 /* Last reference to block. At this point chains cannot be added or
1166 * removed concurrently.
1167 */
1168 for (chain = tcf_get_next_chain(block, NULL);
1169 chain;
1170 chain = tcf_get_next_chain(block, chain)) {
1171 tcf_chain_put_explicitly_created(chain);
1172 tcf_chain_flush(chain, rtnl_held);
1173 }
1174}
1175
1176/* Lookup Qdisc and increments its reference counter.
1177 * Set parent, if necessary.
1178 */
1179
1180static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1181 u32 *parent, int ifindex, bool rtnl_held,
1182 struct netlink_ext_ack *extack)
1183{
1184 const struct Qdisc_class_ops *cops;
1185 struct net_device *dev;
1186 int err = 0;
1187
1188 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1189 return 0;
1190
1191 rcu_read_lock();
1192
1193 /* Find link */
1194 dev = dev_get_by_index_rcu(net, ifindex);
1195 if (!dev) {
1196 rcu_read_unlock();
1197 return -ENODEV;
1198 }
1199
1200 /* Find qdisc */
1201 if (!*parent) {
1202 *q = rcu_dereference(dev->qdisc);
1203 *parent = (*q)->handle;
1204 } else {
1205 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1206 if (!*q) {
1207 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1208 err = -EINVAL;
1209 goto errout_rcu;
1210 }
1211 }
1212
1213 *q = qdisc_refcount_inc_nz(*q);
1214 if (!*q) {
1215 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1216 err = -EINVAL;
1217 goto errout_rcu;
1218 }
1219
1220 /* Is it classful? */
1221 cops = (*q)->ops->cl_ops;
1222 if (!cops) {
1223 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1224 err = -EINVAL;
1225 goto errout_qdisc;
1226 }
1227
1228 if (!cops->tcf_block) {
1229 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1230 err = -EOPNOTSUPP;
1231 goto errout_qdisc;
1232 }
1233
1234errout_rcu:
1235 /* At this point we know that qdisc is not noop_qdisc,
1236 * which means that qdisc holds a reference to net_device
1237 * and we hold a reference to qdisc, so it is safe to release
1238 * rcu read lock.
1239 */
1240 rcu_read_unlock();
1241 return err;
1242
1243errout_qdisc:
1244 rcu_read_unlock();
1245
1246 if (rtnl_held)
1247 qdisc_put(*q);
1248 else
1249 qdisc_put_unlocked(*q);
1250 *q = NULL;
1251
1252 return err;
1253}
1254
1255static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1256 int ifindex, struct netlink_ext_ack *extack)
1257{
1258 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1259 return 0;
1260
1261 /* Do we search for filter, attached to class? */
1262 if (TC_H_MIN(parent)) {
1263 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1264
1265 *cl = cops->find(q, parent);
1266 if (*cl == 0) {
1267 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1268 return -ENOENT;
1269 }
1270 }
1271
1272 return 0;
1273}
1274
1275static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1276 unsigned long cl, int ifindex,
1277 u32 block_index,
1278 struct netlink_ext_ack *extack)
1279{
1280 struct tcf_block *block;
1281
1282 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1283 block = tcf_block_refcnt_get(net, block_index);
1284 if (!block) {
1285 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1286 return ERR_PTR(-EINVAL);
1287 }
1288 } else {
1289 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1290
1291 block = cops->tcf_block(q, cl, extack);
1292 if (!block)
1293 return ERR_PTR(-EINVAL);
1294
1295 if (tcf_block_shared(block)) {
1296 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1297 return ERR_PTR(-EOPNOTSUPP);
1298 }
1299
1300 /* Always take reference to block in order to support execution
1301 * of rules update path of cls API without rtnl lock. Caller
1302 * must release block when it is finished using it. 'if' block
1303 * of this conditional obtain reference to block by calling
1304 * tcf_block_refcnt_get().
1305 */
1306 refcount_inc(&block->refcnt);
1307 }
1308
1309 return block;
1310}
1311
1312static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1313 struct tcf_block_ext_info *ei, bool rtnl_held)
1314{
1315 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1316 /* Flushing/putting all chains will cause the block to be
1317 * deallocated when last chain is freed. However, if chain_list
1318 * is empty, block has to be manually deallocated. After block
1319 * reference counter reached 0, it is no longer possible to
1320 * increment it or add new chains to block.
1321 */
1322 bool free_block = list_empty(&block->chain_list);
1323
1324 mutex_unlock(&block->lock);
1325 if (tcf_block_shared(block))
1326 tcf_block_remove(block, block->net);
1327
1328 if (q)
1329 tcf_block_offload_unbind(block, q, ei);
1330
1331 if (free_block)
1332 tcf_block_destroy(block);
1333 else
1334 tcf_block_flush_all_chains(block, rtnl_held);
1335 } else if (q) {
1336 tcf_block_offload_unbind(block, q, ei);
1337 }
1338}
1339
1340static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1341{
1342 __tcf_block_put(block, NULL, NULL, rtnl_held);
1343}
1344
1345/* Find tcf block.
1346 * Set q, parent, cl when appropriate.
1347 */
1348
1349static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1350 u32 *parent, unsigned long *cl,
1351 int ifindex, u32 block_index,
1352 struct netlink_ext_ack *extack)
1353{
1354 struct tcf_block *block;
1355 int err = 0;
1356
1357 ASSERT_RTNL();
1358
1359 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1360 if (err)
1361 goto errout;
1362
1363 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1364 if (err)
1365 goto errout_qdisc;
1366
1367 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1368 if (IS_ERR(block)) {
1369 err = PTR_ERR(block);
1370 goto errout_qdisc;
1371 }
1372
1373 return block;
1374
1375errout_qdisc:
1376 if (*q)
1377 qdisc_put(*q);
1378errout:
1379 *q = NULL;
1380 return ERR_PTR(err);
1381}
1382
1383static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1384 bool rtnl_held)
1385{
1386 if (!IS_ERR_OR_NULL(block))
1387 tcf_block_refcnt_put(block, rtnl_held);
1388
1389 if (q) {
1390 if (rtnl_held)
1391 qdisc_put(q);
1392 else
1393 qdisc_put_unlocked(q);
1394 }
1395}
1396
1397struct tcf_block_owner_item {
1398 struct list_head list;
1399 struct Qdisc *q;
1400 enum flow_block_binder_type binder_type;
1401};
1402
1403static void
1404tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1405 struct Qdisc *q,
1406 enum flow_block_binder_type binder_type)
1407{
1408 if (block->keep_dst &&
1409 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1410 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1411 netif_keep_dst(qdisc_dev(q));
1412}
1413
1414void tcf_block_netif_keep_dst(struct tcf_block *block)
1415{
1416 struct tcf_block_owner_item *item;
1417
1418 block->keep_dst = true;
1419 list_for_each_entry(item, &block->owner_list, list)
1420 tcf_block_owner_netif_keep_dst(block, item->q,
1421 item->binder_type);
1422}
1423EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1424
1425static int tcf_block_owner_add(struct tcf_block *block,
1426 struct Qdisc *q,
1427 enum flow_block_binder_type binder_type)
1428{
1429 struct tcf_block_owner_item *item;
1430
1431 item = kmalloc(sizeof(*item), GFP_KERNEL);
1432 if (!item)
1433 return -ENOMEM;
1434 item->q = q;
1435 item->binder_type = binder_type;
1436 list_add(&item->list, &block->owner_list);
1437 return 0;
1438}
1439
1440static void tcf_block_owner_del(struct tcf_block *block,
1441 struct Qdisc *q,
1442 enum flow_block_binder_type binder_type)
1443{
1444 struct tcf_block_owner_item *item;
1445
1446 list_for_each_entry(item, &block->owner_list, list) {
1447 if (item->q == q && item->binder_type == binder_type) {
1448 list_del(&item->list);
1449 kfree(item);
1450 return;
1451 }
1452 }
1453 WARN_ON(1);
1454}
1455
1456static bool tcf_block_tracks_dev(struct tcf_block *block,
1457 struct tcf_block_ext_info *ei)
1458{
1459 return tcf_block_shared(block) &&
1460 (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1461 ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1462}
1463
1464int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1465 struct tcf_block_ext_info *ei,
1466 struct netlink_ext_ack *extack)
1467{
1468 struct net_device *dev = qdisc_dev(q);
1469 struct net *net = qdisc_net(q);
1470 struct tcf_block *block = NULL;
1471 int err;
1472
1473 if (ei->block_index)
1474 /* block_index not 0 means the shared block is requested */
1475 block = tcf_block_refcnt_get(net, ei->block_index);
1476
1477 if (!block) {
1478 block = tcf_block_create(net, q, ei->block_index, extack);
1479 if (IS_ERR(block))
1480 return PTR_ERR(block);
1481 if (tcf_block_shared(block)) {
1482 err = tcf_block_insert(block, net, extack);
1483 if (err)
1484 goto err_block_insert;
1485 }
1486 }
1487
1488 err = tcf_block_owner_add(block, q, ei->binder_type);
1489 if (err)
1490 goto err_block_owner_add;
1491
1492 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1493
1494 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1495 if (err)
1496 goto err_chain0_head_change_cb_add;
1497
1498 err = tcf_block_offload_bind(block, q, ei, extack);
1499 if (err)
1500 goto err_block_offload_bind;
1501
1502 if (tcf_block_tracks_dev(block, ei)) {
1503 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1504 if (err) {
1505 NL_SET_ERR_MSG(extack, "block dev insert failed");
1506 goto err_dev_insert;
1507 }
1508 }
1509
1510 *p_block = block;
1511 return 0;
1512
1513err_dev_insert:
1514 tcf_block_offload_unbind(block, q, ei);
1515err_block_offload_bind:
1516 tcf_chain0_head_change_cb_del(block, ei);
1517err_chain0_head_change_cb_add:
1518 tcf_block_owner_del(block, q, ei->binder_type);
1519err_block_owner_add:
1520err_block_insert:
1521 tcf_block_refcnt_put(block, true);
1522 return err;
1523}
1524EXPORT_SYMBOL(tcf_block_get_ext);
1525
1526static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1527{
1528 struct tcf_proto __rcu **p_filter_chain = priv;
1529
1530 rcu_assign_pointer(*p_filter_chain, tp_head);
1531}
1532
1533int tcf_block_get(struct tcf_block **p_block,
1534 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1535 struct netlink_ext_ack *extack)
1536{
1537 struct tcf_block_ext_info ei = {
1538 .chain_head_change = tcf_chain_head_change_dflt,
1539 .chain_head_change_priv = p_filter_chain,
1540 };
1541
1542 WARN_ON(!p_filter_chain);
1543 return tcf_block_get_ext(p_block, q, &ei, extack);
1544}
1545EXPORT_SYMBOL(tcf_block_get);
1546
1547/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1548 * actions should be all removed after flushing.
1549 */
1550void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1551 struct tcf_block_ext_info *ei)
1552{
1553 struct net_device *dev = qdisc_dev(q);
1554
1555 if (!block)
1556 return;
1557 if (tcf_block_tracks_dev(block, ei))
1558 xa_erase(&block->ports, dev->ifindex);
1559 tcf_chain0_head_change_cb_del(block, ei);
1560 tcf_block_owner_del(block, q, ei->binder_type);
1561
1562 __tcf_block_put(block, q, ei, true);
1563}
1564EXPORT_SYMBOL(tcf_block_put_ext);
1565
1566void tcf_block_put(struct tcf_block *block)
1567{
1568 struct tcf_block_ext_info ei = {0, };
1569
1570 if (!block)
1571 return;
1572 tcf_block_put_ext(block, block->q, &ei);
1573}
1574
1575EXPORT_SYMBOL(tcf_block_put);
1576
1577static int
1578tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1579 void *cb_priv, bool add, bool offload_in_use,
1580 struct netlink_ext_ack *extack)
1581{
1582 struct tcf_chain *chain, *chain_prev;
1583 struct tcf_proto *tp, *tp_prev;
1584 int err;
1585
1586 lockdep_assert_held(&block->cb_lock);
1587
1588 for (chain = __tcf_get_next_chain(block, NULL);
1589 chain;
1590 chain_prev = chain,
1591 chain = __tcf_get_next_chain(block, chain),
1592 tcf_chain_put(chain_prev)) {
1593 if (chain->tmplt_ops && add)
1594 chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1595 cb_priv);
1596 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1597 tp_prev = tp,
1598 tp = __tcf_get_next_proto(chain, tp),
1599 tcf_proto_put(tp_prev, true, NULL)) {
1600 if (tp->ops->reoffload) {
1601 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1602 extack);
1603 if (err && add)
1604 goto err_playback_remove;
1605 } else if (add && offload_in_use) {
1606 err = -EOPNOTSUPP;
1607 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1608 goto err_playback_remove;
1609 }
1610 }
1611 if (chain->tmplt_ops && !add)
1612 chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1613 cb_priv);
1614 }
1615
1616 return 0;
1617
1618err_playback_remove:
1619 tcf_proto_put(tp, true, NULL);
1620 tcf_chain_put(chain);
1621 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1622 extack);
1623 return err;
1624}
1625
1626static int tcf_block_bind(struct tcf_block *block,
1627 struct flow_block_offload *bo)
1628{
1629 struct flow_block_cb *block_cb, *next;
1630 int err, i = 0;
1631
1632 lockdep_assert_held(&block->cb_lock);
1633
1634 list_for_each_entry(block_cb, &bo->cb_list, list) {
1635 err = tcf_block_playback_offloads(block, block_cb->cb,
1636 block_cb->cb_priv, true,
1637 tcf_block_offload_in_use(block),
1638 bo->extack);
1639 if (err)
1640 goto err_unroll;
1641 if (!bo->unlocked_driver_cb)
1642 block->lockeddevcnt++;
1643
1644 i++;
1645 }
1646 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1647
1648 return 0;
1649
1650err_unroll:
1651 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1652 list_del(&block_cb->driver_list);
1653 if (i-- > 0) {
1654 list_del(&block_cb->list);
1655 tcf_block_playback_offloads(block, block_cb->cb,
1656 block_cb->cb_priv, false,
1657 tcf_block_offload_in_use(block),
1658 NULL);
1659 if (!bo->unlocked_driver_cb)
1660 block->lockeddevcnt--;
1661 }
1662 flow_block_cb_free(block_cb);
1663 }
1664
1665 return err;
1666}
1667
1668static void tcf_block_unbind(struct tcf_block *block,
1669 struct flow_block_offload *bo)
1670{
1671 struct flow_block_cb *block_cb, *next;
1672
1673 lockdep_assert_held(&block->cb_lock);
1674
1675 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1676 tcf_block_playback_offloads(block, block_cb->cb,
1677 block_cb->cb_priv, false,
1678 tcf_block_offload_in_use(block),
1679 NULL);
1680 list_del(&block_cb->list);
1681 flow_block_cb_free(block_cb);
1682 if (!bo->unlocked_driver_cb)
1683 block->lockeddevcnt--;
1684 }
1685}
1686
1687static int tcf_block_setup(struct tcf_block *block,
1688 struct flow_block_offload *bo)
1689{
1690 int err;
1691
1692 switch (bo->command) {
1693 case FLOW_BLOCK_BIND:
1694 err = tcf_block_bind(block, bo);
1695 break;
1696 case FLOW_BLOCK_UNBIND:
1697 err = 0;
1698 tcf_block_unbind(block, bo);
1699 break;
1700 default:
1701 WARN_ON_ONCE(1);
1702 err = -EOPNOTSUPP;
1703 }
1704
1705 return err;
1706}
1707
1708/* Main classifier routine: scans classifier chain attached
1709 * to this qdisc, (optionally) tests for protocol and asks
1710 * specific classifiers.
1711 */
1712static inline int __tcf_classify(struct sk_buff *skb,
1713 const struct tcf_proto *tp,
1714 const struct tcf_proto *orig_tp,
1715 struct tcf_result *res,
1716 bool compat_mode,
1717 struct tcf_exts_miss_cookie_node *n,
1718 int act_index,
1719 u32 *last_executed_chain)
1720{
1721#ifdef CONFIG_NET_CLS_ACT
1722 const int max_reclassify_loop = 16;
1723 const struct tcf_proto *first_tp;
1724 int limit = 0;
1725
1726reclassify:
1727#endif
1728 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1729 __be16 protocol = skb_protocol(skb, false);
1730 int err = 0;
1731
1732 if (n) {
1733 struct tcf_exts *exts;
1734
1735 if (n->tp_prio != tp->prio)
1736 continue;
1737
1738 /* We re-lookup the tp and chain based on index instead
1739 * of having hard refs and locks to them, so do a sanity
1740 * check if any of tp,chain,exts was replaced by the
1741 * time we got here with a cookie from hardware.
1742 */
1743 if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1744 !tp->ops->get_exts)) {
1745 tcf_set_drop_reason(skb,
1746 SKB_DROP_REASON_TC_COOKIE_ERROR);
1747 return TC_ACT_SHOT;
1748 }
1749
1750 exts = tp->ops->get_exts(tp, n->handle);
1751 if (unlikely(!exts || n->exts != exts)) {
1752 tcf_set_drop_reason(skb,
1753 SKB_DROP_REASON_TC_COOKIE_ERROR);
1754 return TC_ACT_SHOT;
1755 }
1756
1757 n = NULL;
1758 err = tcf_exts_exec_ex(skb, exts, act_index, res);
1759 } else {
1760 if (tp->protocol != protocol &&
1761 tp->protocol != htons(ETH_P_ALL))
1762 continue;
1763
1764 err = tc_classify(skb, tp, res);
1765 }
1766#ifdef CONFIG_NET_CLS_ACT
1767 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1768 first_tp = orig_tp;
1769 *last_executed_chain = first_tp->chain->index;
1770 goto reset;
1771 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1772 first_tp = res->goto_tp;
1773 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1774 goto reset;
1775 }
1776#endif
1777 if (err >= 0)
1778 return err;
1779 }
1780
1781 if (unlikely(n)) {
1782 tcf_set_drop_reason(skb,
1783 SKB_DROP_REASON_TC_COOKIE_ERROR);
1784 return TC_ACT_SHOT;
1785 }
1786
1787 return TC_ACT_UNSPEC; /* signal: continue lookup */
1788#ifdef CONFIG_NET_CLS_ACT
1789reset:
1790 if (unlikely(limit++ >= max_reclassify_loop)) {
1791 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1792 tp->chain->block->index,
1793 tp->prio & 0xffff,
1794 ntohs(tp->protocol));
1795 tcf_set_drop_reason(skb,
1796 SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1797 return TC_ACT_SHOT;
1798 }
1799
1800 tp = first_tp;
1801 goto reclassify;
1802#endif
1803}
1804
1805int tcf_classify(struct sk_buff *skb,
1806 const struct tcf_block *block,
1807 const struct tcf_proto *tp,
1808 struct tcf_result *res, bool compat_mode)
1809{
1810#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1811 u32 last_executed_chain = 0;
1812
1813 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1814 &last_executed_chain);
1815#else
1816 u32 last_executed_chain = tp ? tp->chain->index : 0;
1817 struct tcf_exts_miss_cookie_node *n = NULL;
1818 const struct tcf_proto *orig_tp = tp;
1819 struct tc_skb_ext *ext;
1820 int act_index = 0;
1821 int ret;
1822
1823 if (block) {
1824 ext = skb_ext_find(skb, TC_SKB_EXT);
1825
1826 if (ext && (ext->chain || ext->act_miss)) {
1827 struct tcf_chain *fchain;
1828 u32 chain;
1829
1830 if (ext->act_miss) {
1831 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1832 &act_index);
1833 if (!n) {
1834 tcf_set_drop_reason(skb,
1835 SKB_DROP_REASON_TC_COOKIE_ERROR);
1836 return TC_ACT_SHOT;
1837 }
1838
1839 chain = n->chain_index;
1840 } else {
1841 chain = ext->chain;
1842 }
1843
1844 fchain = tcf_chain_lookup_rcu(block, chain);
1845 if (!fchain) {
1846 tcf_set_drop_reason(skb,
1847 SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1848
1849 return TC_ACT_SHOT;
1850 }
1851
1852 /* Consume, so cloned/redirect skbs won't inherit ext */
1853 skb_ext_del(skb, TC_SKB_EXT);
1854
1855 tp = rcu_dereference_bh(fchain->filter_chain);
1856 last_executed_chain = fchain->index;
1857 }
1858 }
1859
1860 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1861 &last_executed_chain);
1862
1863 if (tc_skb_ext_tc_enabled()) {
1864 /* If we missed on some chain */
1865 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1866 struct tc_skb_cb *cb = tc_skb_cb(skb);
1867
1868 ext = tc_skb_ext_alloc(skb);
1869 if (WARN_ON_ONCE(!ext)) {
1870 tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1871 return TC_ACT_SHOT;
1872 }
1873 ext->chain = last_executed_chain;
1874 ext->mru = cb->mru;
1875 ext->post_ct = cb->post_ct;
1876 ext->post_ct_snat = cb->post_ct_snat;
1877 ext->post_ct_dnat = cb->post_ct_dnat;
1878 ext->zone = cb->zone;
1879 }
1880 }
1881
1882 return ret;
1883#endif
1884}
1885EXPORT_SYMBOL(tcf_classify);
1886
1887struct tcf_chain_info {
1888 struct tcf_proto __rcu **pprev;
1889 struct tcf_proto __rcu *next;
1890};
1891
1892static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1893 struct tcf_chain_info *chain_info)
1894{
1895 return tcf_chain_dereference(*chain_info->pprev, chain);
1896}
1897
1898static int tcf_chain_tp_insert(struct tcf_chain *chain,
1899 struct tcf_chain_info *chain_info,
1900 struct tcf_proto *tp)
1901{
1902 if (chain->flushing)
1903 return -EAGAIN;
1904
1905 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1906 if (*chain_info->pprev == chain->filter_chain)
1907 tcf_chain0_head_change(chain, tp);
1908 tcf_proto_get(tp);
1909 rcu_assign_pointer(*chain_info->pprev, tp);
1910
1911 return 0;
1912}
1913
1914static void tcf_chain_tp_remove(struct tcf_chain *chain,
1915 struct tcf_chain_info *chain_info,
1916 struct tcf_proto *tp)
1917{
1918 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1919
1920 tcf_proto_mark_delete(tp);
1921 if (tp == chain->filter_chain)
1922 tcf_chain0_head_change(chain, next);
1923 RCU_INIT_POINTER(*chain_info->pprev, next);
1924}
1925
1926static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1927 struct tcf_chain_info *chain_info,
1928 u32 protocol, u32 prio,
1929 bool prio_allocate,
1930 struct netlink_ext_ack *extack);
1931
1932/* Try to insert new proto.
1933 * If proto with specified priority already exists, free new proto
1934 * and return existing one.
1935 */
1936
1937static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1938 struct tcf_proto *tp_new,
1939 u32 protocol, u32 prio,
1940 bool rtnl_held)
1941{
1942 struct tcf_chain_info chain_info;
1943 struct tcf_proto *tp;
1944 int err = 0;
1945
1946 mutex_lock(&chain->filter_chain_lock);
1947
1948 if (tcf_proto_exists_destroying(chain, tp_new)) {
1949 mutex_unlock(&chain->filter_chain_lock);
1950 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1951 return ERR_PTR(-EAGAIN);
1952 }
1953
1954 tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL);
1955 if (!tp)
1956 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1957 mutex_unlock(&chain->filter_chain_lock);
1958
1959 if (tp) {
1960 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1961 tp_new = tp;
1962 } else if (err) {
1963 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1964 tp_new = ERR_PTR(err);
1965 }
1966
1967 return tp_new;
1968}
1969
1970static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1971 struct tcf_proto *tp, bool rtnl_held,
1972 struct netlink_ext_ack *extack)
1973{
1974 struct tcf_chain_info chain_info;
1975 struct tcf_proto *tp_iter;
1976 struct tcf_proto **pprev;
1977 struct tcf_proto *next;
1978
1979 mutex_lock(&chain->filter_chain_lock);
1980
1981 /* Atomically find and remove tp from chain. */
1982 for (pprev = &chain->filter_chain;
1983 (tp_iter = tcf_chain_dereference(*pprev, chain));
1984 pprev = &tp_iter->next) {
1985 if (tp_iter == tp) {
1986 chain_info.pprev = pprev;
1987 chain_info.next = tp_iter->next;
1988 WARN_ON(tp_iter->deleting);
1989 break;
1990 }
1991 }
1992 /* Verify that tp still exists and no new filters were inserted
1993 * concurrently.
1994 * Mark tp for deletion if it is empty.
1995 */
1996 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1997 mutex_unlock(&chain->filter_chain_lock);
1998 return;
1999 }
2000
2001 tcf_proto_signal_destroying(chain, tp);
2002 next = tcf_chain_dereference(chain_info.next, chain);
2003 if (tp == chain->filter_chain)
2004 tcf_chain0_head_change(chain, next);
2005 RCU_INIT_POINTER(*chain_info.pprev, next);
2006 mutex_unlock(&chain->filter_chain_lock);
2007
2008 tcf_proto_put(tp, rtnl_held, extack);
2009}
2010
2011static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
2012 struct tcf_chain_info *chain_info,
2013 u32 protocol, u32 prio,
2014 bool prio_allocate,
2015 struct netlink_ext_ack *extack)
2016{
2017 struct tcf_proto **pprev;
2018 struct tcf_proto *tp;
2019
2020 /* Check the chain for existence of proto-tcf with this priority */
2021 for (pprev = &chain->filter_chain;
2022 (tp = tcf_chain_dereference(*pprev, chain));
2023 pprev = &tp->next) {
2024 if (tp->prio >= prio) {
2025 if (tp->prio == prio) {
2026 if (prio_allocate) {
2027 NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use");
2028 return ERR_PTR(-ENOSPC);
2029 }
2030 if (tp->protocol != protocol && protocol) {
2031 NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority");
2032 return ERR_PTR(-EINVAL);
2033 }
2034 } else {
2035 tp = NULL;
2036 }
2037 break;
2038 }
2039 }
2040 chain_info->pprev = pprev;
2041 if (tp) {
2042 chain_info->next = tp->next;
2043 tcf_proto_get(tp);
2044 } else {
2045 chain_info->next = NULL;
2046 }
2047 return tp;
2048}
2049
2050static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2051 struct tcf_proto *tp, struct tcf_block *block,
2052 struct Qdisc *q, u32 parent, void *fh,
2053 u32 portid, u32 seq, u16 flags, int event,
2054 bool terse_dump, bool rtnl_held,
2055 struct netlink_ext_ack *extack)
2056{
2057 struct tcmsg *tcm;
2058 struct nlmsghdr *nlh;
2059 unsigned char *b = skb_tail_pointer(skb);
2060
2061 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2062 if (!nlh)
2063 goto out_nlmsg_trim;
2064 tcm = nlmsg_data(nlh);
2065 tcm->tcm_family = AF_UNSPEC;
2066 tcm->tcm__pad1 = 0;
2067 tcm->tcm__pad2 = 0;
2068 if (q) {
2069 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2070 tcm->tcm_parent = parent;
2071 } else {
2072 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2073 tcm->tcm_block_index = block->index;
2074 }
2075 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2076 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2077 goto nla_put_failure;
2078 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2079 goto nla_put_failure;
2080 if (!fh) {
2081 tcm->tcm_handle = 0;
2082 } else if (terse_dump) {
2083 if (tp->ops->terse_dump) {
2084 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2085 rtnl_held) < 0)
2086 goto nla_put_failure;
2087 } else {
2088 goto cls_op_not_supp;
2089 }
2090 } else {
2091 if (tp->ops->dump &&
2092 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2093 goto nla_put_failure;
2094 }
2095
2096 if (extack && extack->_msg &&
2097 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2098 goto nla_put_failure;
2099
2100 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2101
2102 return skb->len;
2103
2104out_nlmsg_trim:
2105nla_put_failure:
2106cls_op_not_supp:
2107 nlmsg_trim(skb, b);
2108 return -1;
2109}
2110
2111static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2112 struct nlmsghdr *n, struct tcf_proto *tp,
2113 struct tcf_block *block, struct Qdisc *q,
2114 u32 parent, void *fh, int event, bool unicast,
2115 bool rtnl_held, struct netlink_ext_ack *extack)
2116{
2117 struct sk_buff *skb;
2118 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2119 int err = 0;
2120
2121 if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2122 return 0;
2123
2124 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2125 if (!skb)
2126 return -ENOBUFS;
2127
2128 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2129 n->nlmsg_seq, n->nlmsg_flags, event,
2130 false, rtnl_held, extack) <= 0) {
2131 kfree_skb(skb);
2132 return -EINVAL;
2133 }
2134
2135 if (unicast)
2136 err = rtnl_unicast(skb, net, portid);
2137 else
2138 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2139 n->nlmsg_flags & NLM_F_ECHO);
2140 return err;
2141}
2142
2143static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2144 struct nlmsghdr *n, struct tcf_proto *tp,
2145 struct tcf_block *block, struct Qdisc *q,
2146 u32 parent, void *fh, bool *last, bool rtnl_held,
2147 struct netlink_ext_ack *extack)
2148{
2149 struct sk_buff *skb;
2150 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2151 int err;
2152
2153 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2154 return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2155
2156 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2157 if (!skb)
2158 return -ENOBUFS;
2159
2160 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2161 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2162 false, rtnl_held, extack) <= 0) {
2163 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2164 kfree_skb(skb);
2165 return -EINVAL;
2166 }
2167
2168 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2169 if (err) {
2170 kfree_skb(skb);
2171 return err;
2172 }
2173
2174 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2175 n->nlmsg_flags & NLM_F_ECHO);
2176 if (err < 0)
2177 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2178
2179 return err;
2180}
2181
2182static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2183 struct tcf_block *block, struct Qdisc *q,
2184 u32 parent, struct nlmsghdr *n,
2185 struct tcf_chain *chain, int event,
2186 struct netlink_ext_ack *extack)
2187{
2188 struct tcf_proto *tp;
2189
2190 for (tp = tcf_get_next_proto(chain, NULL);
2191 tp; tp = tcf_get_next_proto(chain, tp))
2192 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2193 event, false, true, extack);
2194}
2195
2196static void tfilter_put(struct tcf_proto *tp, void *fh)
2197{
2198 if (tp->ops->put && fh)
2199 tp->ops->put(tp, fh);
2200}
2201
2202static bool is_qdisc_ingress(__u32 classid)
2203{
2204 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2205}
2206
2207static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2208 struct netlink_ext_ack *extack)
2209{
2210 struct net *net = sock_net(skb->sk);
2211 struct nlattr *tca[TCA_MAX + 1];
2212 char name[IFNAMSIZ];
2213 struct tcmsg *t;
2214 u32 protocol;
2215 u32 prio;
2216 bool prio_allocate;
2217 u32 parent;
2218 u32 chain_index;
2219 struct Qdisc *q;
2220 struct tcf_chain_info chain_info;
2221 struct tcf_chain *chain;
2222 struct tcf_block *block;
2223 struct tcf_proto *tp;
2224 unsigned long cl;
2225 void *fh;
2226 int err;
2227 int tp_created;
2228 bool rtnl_held = false;
2229 u32 flags;
2230
2231replay:
2232 tp_created = 0;
2233
2234 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2235 rtm_tca_policy, extack);
2236 if (err < 0)
2237 return err;
2238
2239 t = nlmsg_data(n);
2240 protocol = TC_H_MIN(t->tcm_info);
2241 prio = TC_H_MAJ(t->tcm_info);
2242 prio_allocate = false;
2243 parent = t->tcm_parent;
2244 tp = NULL;
2245 cl = 0;
2246 block = NULL;
2247 q = NULL;
2248 chain = NULL;
2249 flags = 0;
2250
2251 if (prio == 0) {
2252 /* If no priority is provided by the user,
2253 * we allocate one.
2254 */
2255 if (n->nlmsg_flags & NLM_F_CREATE) {
2256 prio = TC_H_MAKE(0x80000000U, 0U);
2257 prio_allocate = true;
2258 } else {
2259 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2260 return -ENOENT;
2261 }
2262 }
2263
2264 /* Find head of filter chain. */
2265
2266 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2267 if (err)
2268 return err;
2269
2270 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2271 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2272 err = -EINVAL;
2273 goto errout;
2274 }
2275
2276 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2277 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2278 * type is not specified, classifier is not unlocked.
2279 */
2280 if (rtnl_held ||
2281 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2282 !tcf_proto_is_unlocked(name)) {
2283 rtnl_held = true;
2284 rtnl_lock();
2285 }
2286
2287 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2288 if (err)
2289 goto errout;
2290
2291 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2292 extack);
2293 if (IS_ERR(block)) {
2294 err = PTR_ERR(block);
2295 goto errout;
2296 }
2297 block->classid = parent;
2298
2299 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2300 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2301 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2302 err = -EINVAL;
2303 goto errout;
2304 }
2305 chain = tcf_chain_get(block, chain_index, true);
2306 if (!chain) {
2307 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2308 err = -ENOMEM;
2309 goto errout;
2310 }
2311
2312 mutex_lock(&chain->filter_chain_lock);
2313 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2314 prio, prio_allocate, extack);
2315 if (IS_ERR(tp)) {
2316 err = PTR_ERR(tp);
2317 goto errout_locked;
2318 }
2319
2320 if (tp == NULL) {
2321 struct tcf_proto *tp_new = NULL;
2322
2323 if (chain->flushing) {
2324 err = -EAGAIN;
2325 goto errout_locked;
2326 }
2327
2328 /* Proto-tcf does not exist, create new one */
2329
2330 if (tca[TCA_KIND] == NULL || !protocol) {
2331 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2332 err = -EINVAL;
2333 goto errout_locked;
2334 }
2335
2336 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2337 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2338 err = -ENOENT;
2339 goto errout_locked;
2340 }
2341
2342 if (prio_allocate)
2343 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2344 &chain_info));
2345
2346 mutex_unlock(&chain->filter_chain_lock);
2347 tp_new = tcf_proto_create(name, protocol, prio, chain,
2348 rtnl_held, extack);
2349 if (IS_ERR(tp_new)) {
2350 err = PTR_ERR(tp_new);
2351 goto errout_tp;
2352 }
2353
2354 tp_created = 1;
2355 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2356 rtnl_held);
2357 if (IS_ERR(tp)) {
2358 err = PTR_ERR(tp);
2359 goto errout_tp;
2360 }
2361 } else {
2362 mutex_unlock(&chain->filter_chain_lock);
2363 }
2364
2365 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2366 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2367 err = -EINVAL;
2368 goto errout;
2369 }
2370
2371 fh = tp->ops->get(tp, t->tcm_handle);
2372
2373 if (!fh) {
2374 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2375 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2376 err = -ENOENT;
2377 goto errout;
2378 }
2379 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2380 tfilter_put(tp, fh);
2381 NL_SET_ERR_MSG(extack, "Filter already exists");
2382 err = -EEXIST;
2383 goto errout;
2384 }
2385
2386 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2387 tfilter_put(tp, fh);
2388 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2389 err = -EINVAL;
2390 goto errout;
2391 }
2392
2393 if (!(n->nlmsg_flags & NLM_F_CREATE))
2394 flags |= TCA_ACT_FLAGS_REPLACE;
2395 if (!rtnl_held)
2396 flags |= TCA_ACT_FLAGS_NO_RTNL;
2397 if (is_qdisc_ingress(parent))
2398 flags |= TCA_ACT_FLAGS_AT_INGRESS;
2399 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2400 flags, extack);
2401 if (err == 0) {
2402 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2403 RTM_NEWTFILTER, false, rtnl_held, extack);
2404 tfilter_put(tp, fh);
2405 tcf_proto_count_usesw(tp, true);
2406 /* q pointer is NULL for shared blocks */
2407 if (q)
2408 q->flags &= ~TCQ_F_CAN_BYPASS;
2409 }
2410
2411errout:
2412 if (err && tp_created)
2413 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2414errout_tp:
2415 if (chain) {
2416 if (tp && !IS_ERR(tp))
2417 tcf_proto_put(tp, rtnl_held, NULL);
2418 if (!tp_created)
2419 tcf_chain_put(chain);
2420 }
2421 tcf_block_release(q, block, rtnl_held);
2422
2423 if (rtnl_held)
2424 rtnl_unlock();
2425
2426 if (err == -EAGAIN) {
2427 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2428 * of target chain.
2429 */
2430 rtnl_held = true;
2431 /* Replay the request. */
2432 goto replay;
2433 }
2434 return err;
2435
2436errout_locked:
2437 mutex_unlock(&chain->filter_chain_lock);
2438 goto errout;
2439}
2440
2441static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2442 struct netlink_ext_ack *extack)
2443{
2444 struct net *net = sock_net(skb->sk);
2445 struct nlattr *tca[TCA_MAX + 1];
2446 char name[IFNAMSIZ];
2447 struct tcmsg *t;
2448 u32 protocol;
2449 u32 prio;
2450 u32 parent;
2451 u32 chain_index;
2452 struct Qdisc *q = NULL;
2453 struct tcf_chain_info chain_info;
2454 struct tcf_chain *chain = NULL;
2455 struct tcf_block *block = NULL;
2456 struct tcf_proto *tp = NULL;
2457 unsigned long cl = 0;
2458 void *fh = NULL;
2459 int err;
2460 bool rtnl_held = false;
2461
2462 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2463 rtm_tca_policy, extack);
2464 if (err < 0)
2465 return err;
2466
2467 t = nlmsg_data(n);
2468 protocol = TC_H_MIN(t->tcm_info);
2469 prio = TC_H_MAJ(t->tcm_info);
2470 parent = t->tcm_parent;
2471
2472 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2473 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2474 return -ENOENT;
2475 }
2476
2477 /* Find head of filter chain. */
2478
2479 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2480 if (err)
2481 return err;
2482
2483 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2484 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2485 err = -EINVAL;
2486 goto errout;
2487 }
2488 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2489 * found), qdisc is not unlocked, classifier type is not specified,
2490 * classifier is not unlocked.
2491 */
2492 if (!prio ||
2493 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2494 !tcf_proto_is_unlocked(name)) {
2495 rtnl_held = true;
2496 rtnl_lock();
2497 }
2498
2499 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2500 if (err)
2501 goto errout;
2502
2503 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2504 extack);
2505 if (IS_ERR(block)) {
2506 err = PTR_ERR(block);
2507 goto errout;
2508 }
2509
2510 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2511 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2512 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2513 err = -EINVAL;
2514 goto errout;
2515 }
2516 chain = tcf_chain_get(block, chain_index, false);
2517 if (!chain) {
2518 /* User requested flush on non-existent chain. Nothing to do,
2519 * so just return success.
2520 */
2521 if (prio == 0) {
2522 err = 0;
2523 goto errout;
2524 }
2525 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2526 err = -ENOENT;
2527 goto errout;
2528 }
2529
2530 if (prio == 0) {
2531 tfilter_notify_chain(net, skb, block, q, parent, n,
2532 chain, RTM_DELTFILTER, extack);
2533 tcf_chain_flush(chain, rtnl_held);
2534 err = 0;
2535 goto errout;
2536 }
2537
2538 mutex_lock(&chain->filter_chain_lock);
2539 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2540 prio, false, extack);
2541 if (!tp) {
2542 err = -ENOENT;
2543 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2544 goto errout_locked;
2545 } else if (IS_ERR(tp)) {
2546 err = PTR_ERR(tp);
2547 goto errout_locked;
2548 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2549 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2550 err = -EINVAL;
2551 goto errout_locked;
2552 } else if (t->tcm_handle == 0) {
2553 tcf_proto_signal_destroying(chain, tp);
2554 tcf_chain_tp_remove(chain, &chain_info, tp);
2555 mutex_unlock(&chain->filter_chain_lock);
2556
2557 tcf_proto_put(tp, rtnl_held, NULL);
2558 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2559 RTM_DELTFILTER, false, rtnl_held, extack);
2560 err = 0;
2561 goto errout;
2562 }
2563 mutex_unlock(&chain->filter_chain_lock);
2564
2565 fh = tp->ops->get(tp, t->tcm_handle);
2566
2567 if (!fh) {
2568 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2569 err = -ENOENT;
2570 } else {
2571 bool last;
2572
2573 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2574 &last, rtnl_held, extack);
2575
2576 if (err)
2577 goto errout;
2578 if (last)
2579 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2580 }
2581
2582errout:
2583 if (chain) {
2584 if (tp && !IS_ERR(tp))
2585 tcf_proto_put(tp, rtnl_held, NULL);
2586 tcf_chain_put(chain);
2587 }
2588 tcf_block_release(q, block, rtnl_held);
2589
2590 if (rtnl_held)
2591 rtnl_unlock();
2592
2593 return err;
2594
2595errout_locked:
2596 mutex_unlock(&chain->filter_chain_lock);
2597 goto errout;
2598}
2599
2600static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2601 struct netlink_ext_ack *extack)
2602{
2603 struct net *net = sock_net(skb->sk);
2604 struct nlattr *tca[TCA_MAX + 1];
2605 char name[IFNAMSIZ];
2606 struct tcmsg *t;
2607 u32 protocol;
2608 u32 prio;
2609 u32 parent;
2610 u32 chain_index;
2611 struct Qdisc *q = NULL;
2612 struct tcf_chain_info chain_info;
2613 struct tcf_chain *chain = NULL;
2614 struct tcf_block *block = NULL;
2615 struct tcf_proto *tp = NULL;
2616 unsigned long cl = 0;
2617 void *fh = NULL;
2618 int err;
2619 bool rtnl_held = false;
2620
2621 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2622 rtm_tca_policy, extack);
2623 if (err < 0)
2624 return err;
2625
2626 t = nlmsg_data(n);
2627 protocol = TC_H_MIN(t->tcm_info);
2628 prio = TC_H_MAJ(t->tcm_info);
2629 parent = t->tcm_parent;
2630
2631 if (prio == 0) {
2632 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2633 return -ENOENT;
2634 }
2635
2636 /* Find head of filter chain. */
2637
2638 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2639 if (err)
2640 return err;
2641
2642 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2643 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2644 err = -EINVAL;
2645 goto errout;
2646 }
2647 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2648 * unlocked, classifier type is not specified, classifier is not
2649 * unlocked.
2650 */
2651 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2652 !tcf_proto_is_unlocked(name)) {
2653 rtnl_held = true;
2654 rtnl_lock();
2655 }
2656
2657 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2658 if (err)
2659 goto errout;
2660
2661 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2662 extack);
2663 if (IS_ERR(block)) {
2664 err = PTR_ERR(block);
2665 goto errout;
2666 }
2667
2668 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2669 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2670 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2671 err = -EINVAL;
2672 goto errout;
2673 }
2674 chain = tcf_chain_get(block, chain_index, false);
2675 if (!chain) {
2676 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2677 err = -EINVAL;
2678 goto errout;
2679 }
2680
2681 mutex_lock(&chain->filter_chain_lock);
2682 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2683 prio, false, extack);
2684 mutex_unlock(&chain->filter_chain_lock);
2685 if (!tp) {
2686 err = -ENOENT;
2687 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2688 goto errout;
2689 } else if (IS_ERR(tp)) {
2690 err = PTR_ERR(tp);
2691 goto errout;
2692 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2693 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2694 err = -EINVAL;
2695 goto errout;
2696 }
2697
2698 fh = tp->ops->get(tp, t->tcm_handle);
2699
2700 if (!fh) {
2701 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2702 err = -ENOENT;
2703 } else {
2704 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2705 fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2706 if (err < 0)
2707 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2708 }
2709
2710 tfilter_put(tp, fh);
2711errout:
2712 if (chain) {
2713 if (tp && !IS_ERR(tp))
2714 tcf_proto_put(tp, rtnl_held, NULL);
2715 tcf_chain_put(chain);
2716 }
2717 tcf_block_release(q, block, rtnl_held);
2718
2719 if (rtnl_held)
2720 rtnl_unlock();
2721
2722 return err;
2723}
2724
2725struct tcf_dump_args {
2726 struct tcf_walker w;
2727 struct sk_buff *skb;
2728 struct netlink_callback *cb;
2729 struct tcf_block *block;
2730 struct Qdisc *q;
2731 u32 parent;
2732 bool terse_dump;
2733};
2734
2735static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2736{
2737 struct tcf_dump_args *a = (void *)arg;
2738 struct net *net = sock_net(a->skb->sk);
2739
2740 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2741 n, NETLINK_CB(a->cb->skb).portid,
2742 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2743 RTM_NEWTFILTER, a->terse_dump, true, NULL);
2744}
2745
2746static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2747 struct sk_buff *skb, struct netlink_callback *cb,
2748 long index_start, long *p_index, bool terse)
2749{
2750 struct net *net = sock_net(skb->sk);
2751 struct tcf_block *block = chain->block;
2752 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2753 struct tcf_proto *tp, *tp_prev;
2754 struct tcf_dump_args arg;
2755
2756 for (tp = __tcf_get_next_proto(chain, NULL);
2757 tp;
2758 tp_prev = tp,
2759 tp = __tcf_get_next_proto(chain, tp),
2760 tcf_proto_put(tp_prev, true, NULL),
2761 (*p_index)++) {
2762 if (*p_index < index_start)
2763 continue;
2764 if (TC_H_MAJ(tcm->tcm_info) &&
2765 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2766 continue;
2767 if (TC_H_MIN(tcm->tcm_info) &&
2768 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2769 continue;
2770 if (*p_index > index_start)
2771 memset(&cb->args[1], 0,
2772 sizeof(cb->args) - sizeof(cb->args[0]));
2773 if (cb->args[1] == 0) {
2774 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2775 NETLINK_CB(cb->skb).portid,
2776 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2777 RTM_NEWTFILTER, false, true, NULL) <= 0)
2778 goto errout;
2779 cb->args[1] = 1;
2780 }
2781 if (!tp->ops->walk)
2782 continue;
2783 arg.w.fn = tcf_node_dump;
2784 arg.skb = skb;
2785 arg.cb = cb;
2786 arg.block = block;
2787 arg.q = q;
2788 arg.parent = parent;
2789 arg.w.stop = 0;
2790 arg.w.skip = cb->args[1] - 1;
2791 arg.w.count = 0;
2792 arg.w.cookie = cb->args[2];
2793 arg.terse_dump = terse;
2794 tp->ops->walk(tp, &arg.w, true);
2795 cb->args[2] = arg.w.cookie;
2796 cb->args[1] = arg.w.count + 1;
2797 if (arg.w.stop)
2798 goto errout;
2799 }
2800 return true;
2801
2802errout:
2803 tcf_proto_put(tp, true, NULL);
2804 return false;
2805}
2806
2807static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2808 [TCA_CHAIN] = { .type = NLA_U32 },
2809 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2810};
2811
2812/* called with RTNL */
2813static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2814{
2815 struct tcf_chain *chain, *chain_prev;
2816 struct net *net = sock_net(skb->sk);
2817 struct nlattr *tca[TCA_MAX + 1];
2818 struct Qdisc *q = NULL;
2819 struct tcf_block *block;
2820 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2821 bool terse_dump = false;
2822 long index_start;
2823 long index;
2824 u32 parent;
2825 int err;
2826
2827 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2828 return skb->len;
2829
2830 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2831 tcf_tfilter_dump_policy, cb->extack);
2832 if (err)
2833 return err;
2834
2835 if (tca[TCA_DUMP_FLAGS]) {
2836 struct nla_bitfield32 flags =
2837 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2838
2839 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2840 }
2841
2842 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2843 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2844 if (!block)
2845 goto out;
2846 /* If we work with block index, q is NULL and parent value
2847 * will never be used in the following code. The check
2848 * in tcf_fill_node prevents it. However, compiler does not
2849 * see that far, so set parent to zero to silence the warning
2850 * about parent being uninitialized.
2851 */
2852 parent = 0;
2853 } else {
2854 const struct Qdisc_class_ops *cops;
2855 struct net_device *dev;
2856 unsigned long cl = 0;
2857
2858 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2859 if (!dev)
2860 return skb->len;
2861
2862 parent = tcm->tcm_parent;
2863 if (!parent)
2864 q = rtnl_dereference(dev->qdisc);
2865 else
2866 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2867 if (!q)
2868 goto out;
2869 cops = q->ops->cl_ops;
2870 if (!cops)
2871 goto out;
2872 if (!cops->tcf_block)
2873 goto out;
2874 if (TC_H_MIN(tcm->tcm_parent)) {
2875 cl = cops->find(q, tcm->tcm_parent);
2876 if (cl == 0)
2877 goto out;
2878 }
2879 block = cops->tcf_block(q, cl, NULL);
2880 if (!block)
2881 goto out;
2882 parent = block->classid;
2883 if (tcf_block_shared(block))
2884 q = NULL;
2885 }
2886
2887 index_start = cb->args[0];
2888 index = 0;
2889
2890 for (chain = __tcf_get_next_chain(block, NULL);
2891 chain;
2892 chain_prev = chain,
2893 chain = __tcf_get_next_chain(block, chain),
2894 tcf_chain_put(chain_prev)) {
2895 if (tca[TCA_CHAIN] &&
2896 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2897 continue;
2898 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2899 index_start, &index, terse_dump)) {
2900 tcf_chain_put(chain);
2901 err = -EMSGSIZE;
2902 break;
2903 }
2904 }
2905
2906 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2907 tcf_block_refcnt_put(block, true);
2908 cb->args[0] = index;
2909
2910out:
2911 /* If we did no progress, the error (EMSGSIZE) is real */
2912 if (skb->len == 0 && err)
2913 return err;
2914 return skb->len;
2915}
2916
2917static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2918 void *tmplt_priv, u32 chain_index,
2919 struct net *net, struct sk_buff *skb,
2920 struct tcf_block *block,
2921 u32 portid, u32 seq, u16 flags, int event,
2922 struct netlink_ext_ack *extack)
2923{
2924 unsigned char *b = skb_tail_pointer(skb);
2925 const struct tcf_proto_ops *ops;
2926 struct nlmsghdr *nlh;
2927 struct tcmsg *tcm;
2928 void *priv;
2929
2930 ops = tmplt_ops;
2931 priv = tmplt_priv;
2932
2933 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2934 if (!nlh)
2935 goto out_nlmsg_trim;
2936 tcm = nlmsg_data(nlh);
2937 tcm->tcm_family = AF_UNSPEC;
2938 tcm->tcm__pad1 = 0;
2939 tcm->tcm__pad2 = 0;
2940 tcm->tcm_handle = 0;
2941 if (block->q) {
2942 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2943 tcm->tcm_parent = block->q->handle;
2944 } else {
2945 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2946 tcm->tcm_block_index = block->index;
2947 }
2948
2949 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2950 goto nla_put_failure;
2951
2952 if (ops) {
2953 if (nla_put_string(skb, TCA_KIND, ops->kind))
2954 goto nla_put_failure;
2955 if (ops->tmplt_dump(skb, net, priv) < 0)
2956 goto nla_put_failure;
2957 }
2958
2959 if (extack && extack->_msg &&
2960 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2961 goto out_nlmsg_trim;
2962
2963 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2964
2965 return skb->len;
2966
2967out_nlmsg_trim:
2968nla_put_failure:
2969 nlmsg_trim(skb, b);
2970 return -EMSGSIZE;
2971}
2972
2973static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2974 u32 seq, u16 flags, int event, bool unicast,
2975 struct netlink_ext_ack *extack)
2976{
2977 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2978 struct tcf_block *block = chain->block;
2979 struct net *net = block->net;
2980 struct sk_buff *skb;
2981 int err = 0;
2982
2983 if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
2984 return 0;
2985
2986 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2987 if (!skb)
2988 return -ENOBUFS;
2989
2990 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2991 chain->index, net, skb, block, portid,
2992 seq, flags, event, extack) <= 0) {
2993 kfree_skb(skb);
2994 return -EINVAL;
2995 }
2996
2997 if (unicast)
2998 err = rtnl_unicast(skb, net, portid);
2999 else
3000 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
3001 flags & NLM_F_ECHO);
3002
3003 return err;
3004}
3005
3006static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
3007 void *tmplt_priv, u32 chain_index,
3008 struct tcf_block *block, struct sk_buff *oskb,
3009 u32 seq, u16 flags)
3010{
3011 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3012 struct net *net = block->net;
3013 struct sk_buff *skb;
3014
3015 if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
3016 return 0;
3017
3018 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3019 if (!skb)
3020 return -ENOBUFS;
3021
3022 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
3023 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3024 kfree_skb(skb);
3025 return -EINVAL;
3026 }
3027
3028 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
3029}
3030
3031static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
3032 struct nlattr **tca,
3033 struct netlink_ext_ack *extack)
3034{
3035 const struct tcf_proto_ops *ops;
3036 char name[IFNAMSIZ];
3037 void *tmplt_priv;
3038
3039 /* If kind is not set, user did not specify template. */
3040 if (!tca[TCA_KIND])
3041 return 0;
3042
3043 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3044 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3045 return -EINVAL;
3046 }
3047
3048 ops = tcf_proto_lookup_ops(name, true, extack);
3049 if (IS_ERR(ops))
3050 return PTR_ERR(ops);
3051 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3052 !ops->tmplt_reoffload) {
3053 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3054 module_put(ops->owner);
3055 return -EOPNOTSUPP;
3056 }
3057
3058 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3059 if (IS_ERR(tmplt_priv)) {
3060 module_put(ops->owner);
3061 return PTR_ERR(tmplt_priv);
3062 }
3063 chain->tmplt_ops = ops;
3064 chain->tmplt_priv = tmplt_priv;
3065 return 0;
3066}
3067
3068static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3069 void *tmplt_priv)
3070{
3071 /* If template ops are set, no work to do for us. */
3072 if (!tmplt_ops)
3073 return;
3074
3075 tmplt_ops->tmplt_destroy(tmplt_priv);
3076 module_put(tmplt_ops->owner);
3077}
3078
3079/* Add/delete/get a chain */
3080
3081static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3082 struct netlink_ext_ack *extack)
3083{
3084 struct net *net = sock_net(skb->sk);
3085 struct nlattr *tca[TCA_MAX + 1];
3086 struct tcmsg *t;
3087 u32 parent;
3088 u32 chain_index;
3089 struct Qdisc *q;
3090 struct tcf_chain *chain;
3091 struct tcf_block *block;
3092 unsigned long cl;
3093 int err;
3094
3095replay:
3096 q = NULL;
3097 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3098 rtm_tca_policy, extack);
3099 if (err < 0)
3100 return err;
3101
3102 t = nlmsg_data(n);
3103 parent = t->tcm_parent;
3104 cl = 0;
3105
3106 block = tcf_block_find(net, &q, &parent, &cl,
3107 t->tcm_ifindex, t->tcm_block_index, extack);
3108 if (IS_ERR(block))
3109 return PTR_ERR(block);
3110
3111 chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
3112 if (chain_index > TC_ACT_EXT_VAL_MASK) {
3113 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3114 err = -EINVAL;
3115 goto errout_block;
3116 }
3117
3118 mutex_lock(&block->lock);
3119 chain = tcf_chain_lookup(block, chain_index);
3120 if (n->nlmsg_type == RTM_NEWCHAIN) {
3121 if (chain) {
3122 if (tcf_chain_held_by_acts_only(chain)) {
3123 /* The chain exists only because there is
3124 * some action referencing it.
3125 */
3126 tcf_chain_hold(chain);
3127 } else {
3128 NL_SET_ERR_MSG(extack, "Filter chain already exists");
3129 err = -EEXIST;
3130 goto errout_block_locked;
3131 }
3132 } else {
3133 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3134 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3135 err = -ENOENT;
3136 goto errout_block_locked;
3137 }
3138 chain = tcf_chain_create(block, chain_index);
3139 if (!chain) {
3140 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3141 err = -ENOMEM;
3142 goto errout_block_locked;
3143 }
3144 }
3145 } else {
3146 if (!chain || tcf_chain_held_by_acts_only(chain)) {
3147 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3148 err = -EINVAL;
3149 goto errout_block_locked;
3150 }
3151 tcf_chain_hold(chain);
3152 }
3153
3154 if (n->nlmsg_type == RTM_NEWCHAIN) {
3155 /* Modifying chain requires holding parent block lock. In case
3156 * the chain was successfully added, take a reference to the
3157 * chain. This ensures that an empty chain does not disappear at
3158 * the end of this function.
3159 */
3160 tcf_chain_hold(chain);
3161 chain->explicitly_created = true;
3162 }
3163 mutex_unlock(&block->lock);
3164
3165 switch (n->nlmsg_type) {
3166 case RTM_NEWCHAIN:
3167 err = tc_chain_tmplt_add(chain, net, tca, extack);
3168 if (err) {
3169 tcf_chain_put_explicitly_created(chain);
3170 goto errout;
3171 }
3172
3173 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3174 RTM_NEWCHAIN, false, extack);
3175 break;
3176 case RTM_DELCHAIN:
3177 tfilter_notify_chain(net, skb, block, q, parent, n,
3178 chain, RTM_DELTFILTER, extack);
3179 /* Flush the chain first as the user requested chain removal. */
3180 tcf_chain_flush(chain, true);
3181 /* In case the chain was successfully deleted, put a reference
3182 * to the chain previously taken during addition.
3183 */
3184 tcf_chain_put_explicitly_created(chain);
3185 break;
3186 case RTM_GETCHAIN:
3187 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3188 n->nlmsg_flags, n->nlmsg_type, true, extack);
3189 if (err < 0)
3190 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3191 break;
3192 default:
3193 err = -EOPNOTSUPP;
3194 NL_SET_ERR_MSG(extack, "Unsupported message type");
3195 goto errout;
3196 }
3197
3198errout:
3199 tcf_chain_put(chain);
3200errout_block:
3201 tcf_block_release(q, block, true);
3202 if (err == -EAGAIN)
3203 /* Replay the request. */
3204 goto replay;
3205 return err;
3206
3207errout_block_locked:
3208 mutex_unlock(&block->lock);
3209 goto errout_block;
3210}
3211
3212/* called with RTNL */
3213static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3214{
3215 struct net *net = sock_net(skb->sk);
3216 struct nlattr *tca[TCA_MAX + 1];
3217 struct Qdisc *q = NULL;
3218 struct tcf_block *block;
3219 struct tcmsg *tcm = nlmsg_data(cb->nlh);
3220 struct tcf_chain *chain;
3221 long index_start;
3222 long index;
3223 int err;
3224
3225 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3226 return skb->len;
3227
3228 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3229 rtm_tca_policy, cb->extack);
3230 if (err)
3231 return err;
3232
3233 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3234 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3235 if (!block)
3236 goto out;
3237 } else {
3238 const struct Qdisc_class_ops *cops;
3239 struct net_device *dev;
3240 unsigned long cl = 0;
3241
3242 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3243 if (!dev)
3244 return skb->len;
3245
3246 if (!tcm->tcm_parent)
3247 q = rtnl_dereference(dev->qdisc);
3248 else
3249 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3250
3251 if (!q)
3252 goto out;
3253 cops = q->ops->cl_ops;
3254 if (!cops)
3255 goto out;
3256 if (!cops->tcf_block)
3257 goto out;
3258 if (TC_H_MIN(tcm->tcm_parent)) {
3259 cl = cops->find(q, tcm->tcm_parent);
3260 if (cl == 0)
3261 goto out;
3262 }
3263 block = cops->tcf_block(q, cl, NULL);
3264 if (!block)
3265 goto out;
3266 if (tcf_block_shared(block))
3267 q = NULL;
3268 }
3269
3270 index_start = cb->args[0];
3271 index = 0;
3272
3273 mutex_lock(&block->lock);
3274 list_for_each_entry(chain, &block->chain_list, list) {
3275 if ((tca[TCA_CHAIN] &&
3276 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3277 continue;
3278 if (index < index_start) {
3279 index++;
3280 continue;
3281 }
3282 if (tcf_chain_held_by_acts_only(chain))
3283 continue;
3284 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3285 chain->index, net, skb, block,
3286 NETLINK_CB(cb->skb).portid,
3287 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3288 RTM_NEWCHAIN, NULL);
3289 if (err <= 0)
3290 break;
3291 index++;
3292 }
3293 mutex_unlock(&block->lock);
3294
3295 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3296 tcf_block_refcnt_put(block, true);
3297 cb->args[0] = index;
3298
3299out:
3300 /* If we did no progress, the error (EMSGSIZE) is real */
3301 if (skb->len == 0 && err)
3302 return err;
3303 return skb->len;
3304}
3305
3306int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3307 int police, struct tcf_proto *tp, u32 handle,
3308 bool use_action_miss)
3309{
3310 int err = 0;
3311
3312#ifdef CONFIG_NET_CLS_ACT
3313 exts->type = 0;
3314 exts->nr_actions = 0;
3315 exts->miss_cookie_node = NULL;
3316 /* Note: we do not own yet a reference on net.
3317 * This reference might be taken later from tcf_exts_get_net().
3318 */
3319 exts->net = net;
3320 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3321 GFP_KERNEL);
3322 if (!exts->actions)
3323 return -ENOMEM;
3324#endif
3325
3326 exts->action = action;
3327 exts->police = police;
3328
3329 if (!use_action_miss)
3330 return 0;
3331
3332 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3333 if (err)
3334 goto err_miss_alloc;
3335
3336 return 0;
3337
3338err_miss_alloc:
3339 tcf_exts_destroy(exts);
3340#ifdef CONFIG_NET_CLS_ACT
3341 exts->actions = NULL;
3342#endif
3343 return err;
3344}
3345EXPORT_SYMBOL(tcf_exts_init_ex);
3346
3347void tcf_exts_destroy(struct tcf_exts *exts)
3348{
3349 tcf_exts_miss_cookie_base_destroy(exts);
3350
3351#ifdef CONFIG_NET_CLS_ACT
3352 if (exts->actions) {
3353 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3354 kfree(exts->actions);
3355 }
3356 exts->nr_actions = 0;
3357#endif
3358}
3359EXPORT_SYMBOL(tcf_exts_destroy);
3360
3361int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3362 struct nlattr *rate_tlv, struct tcf_exts *exts,
3363 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3364{
3365#ifdef CONFIG_NET_CLS_ACT
3366 {
3367 int init_res[TCA_ACT_MAX_PRIO] = {};
3368 struct tc_action *act;
3369 size_t attr_size = 0;
3370
3371 if (exts->police && tb[exts->police]) {
3372 struct tc_action_ops *a_o;
3373
3374 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3375 a_o = tc_action_load_ops(tb[exts->police], flags,
3376 extack);
3377 if (IS_ERR(a_o))
3378 return PTR_ERR(a_o);
3379 act = tcf_action_init_1(net, tp, tb[exts->police],
3380 rate_tlv, a_o, init_res, flags,
3381 extack);
3382 module_put(a_o->owner);
3383 if (IS_ERR(act))
3384 return PTR_ERR(act);
3385
3386 act->type = exts->type = TCA_OLD_COMPAT;
3387 exts->actions[0] = act;
3388 exts->nr_actions = 1;
3389 tcf_idr_insert_many(exts->actions, init_res);
3390 } else if (exts->action && tb[exts->action]) {
3391 int err;
3392
3393 flags |= TCA_ACT_FLAGS_BIND;
3394 err = tcf_action_init(net, tp, tb[exts->action],
3395 rate_tlv, exts->actions, init_res,
3396 &attr_size, flags, fl_flags,
3397 extack);
3398 if (err < 0)
3399 return err;
3400 exts->nr_actions = err;
3401 }
3402 }
3403#else
3404 if ((exts->action && tb[exts->action]) ||
3405 (exts->police && tb[exts->police])) {
3406 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3407 return -EOPNOTSUPP;
3408 }
3409#endif
3410
3411 return 0;
3412}
3413EXPORT_SYMBOL(tcf_exts_validate_ex);
3414
3415int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3416 struct nlattr *rate_tlv, struct tcf_exts *exts,
3417 u32 flags, struct netlink_ext_ack *extack)
3418{
3419 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3420 flags, 0, extack);
3421}
3422EXPORT_SYMBOL(tcf_exts_validate);
3423
3424void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3425{
3426#ifdef CONFIG_NET_CLS_ACT
3427 struct tcf_exts old = *dst;
3428
3429 *dst = *src;
3430 tcf_exts_destroy(&old);
3431#endif
3432}
3433EXPORT_SYMBOL(tcf_exts_change);
3434
3435#ifdef CONFIG_NET_CLS_ACT
3436static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3437{
3438 if (exts->nr_actions == 0)
3439 return NULL;
3440 else
3441 return exts->actions[0];
3442}
3443#endif
3444
3445int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3446{
3447#ifdef CONFIG_NET_CLS_ACT
3448 struct nlattr *nest;
3449
3450 if (exts->action && tcf_exts_has_actions(exts)) {
3451 /*
3452 * again for backward compatible mode - we want
3453 * to work with both old and new modes of entering
3454 * tc data even if iproute2 was newer - jhs
3455 */
3456 if (exts->type != TCA_OLD_COMPAT) {
3457 nest = nla_nest_start_noflag(skb, exts->action);
3458 if (nest == NULL)
3459 goto nla_put_failure;
3460
3461 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3462 < 0)
3463 goto nla_put_failure;
3464 nla_nest_end(skb, nest);
3465 } else if (exts->police) {
3466 struct tc_action *act = tcf_exts_first_act(exts);
3467 nest = nla_nest_start_noflag(skb, exts->police);
3468 if (nest == NULL || !act)
3469 goto nla_put_failure;
3470 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3471 goto nla_put_failure;
3472 nla_nest_end(skb, nest);
3473 }
3474 }
3475 return 0;
3476
3477nla_put_failure:
3478 nla_nest_cancel(skb, nest);
3479 return -1;
3480#else
3481 return 0;
3482#endif
3483}
3484EXPORT_SYMBOL(tcf_exts_dump);
3485
3486int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3487{
3488#ifdef CONFIG_NET_CLS_ACT
3489 struct nlattr *nest;
3490
3491 if (!exts->action || !tcf_exts_has_actions(exts))
3492 return 0;
3493
3494 nest = nla_nest_start_noflag(skb, exts->action);
3495 if (!nest)
3496 goto nla_put_failure;
3497
3498 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3499 goto nla_put_failure;
3500 nla_nest_end(skb, nest);
3501 return 0;
3502
3503nla_put_failure:
3504 nla_nest_cancel(skb, nest);
3505 return -1;
3506#else
3507 return 0;
3508#endif
3509}
3510EXPORT_SYMBOL(tcf_exts_terse_dump);
3511
3512int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3513{
3514#ifdef CONFIG_NET_CLS_ACT
3515 struct tc_action *a = tcf_exts_first_act(exts);
3516 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3517 return -1;
3518#endif
3519 return 0;
3520}
3521EXPORT_SYMBOL(tcf_exts_dump_stats);
3522
3523static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3524{
3525 if (*flags & TCA_CLS_FLAGS_IN_HW)
3526 return;
3527 *flags |= TCA_CLS_FLAGS_IN_HW;
3528 atomic_inc(&block->offloadcnt);
3529}
3530
3531static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3532{
3533 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3534 return;
3535 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3536 atomic_dec(&block->offloadcnt);
3537}
3538
3539static void tc_cls_offload_cnt_update(struct tcf_block *block,
3540 struct tcf_proto *tp, u32 *cnt,
3541 u32 *flags, u32 diff, bool add)
3542{
3543 lockdep_assert_held(&block->cb_lock);
3544
3545 spin_lock(&tp->lock);
3546 if (add) {
3547 if (!*cnt)
3548 tcf_block_offload_inc(block, flags);
3549 *cnt += diff;
3550 } else {
3551 *cnt -= diff;
3552 if (!*cnt)
3553 tcf_block_offload_dec(block, flags);
3554 }
3555 spin_unlock(&tp->lock);
3556}
3557
3558static void
3559tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3560 u32 *cnt, u32 *flags)
3561{
3562 lockdep_assert_held(&block->cb_lock);
3563
3564 spin_lock(&tp->lock);
3565 tcf_block_offload_dec(block, flags);
3566 *cnt = 0;
3567 spin_unlock(&tp->lock);
3568}
3569
3570static int
3571__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3572 void *type_data, bool err_stop)
3573{
3574 struct flow_block_cb *block_cb;
3575 int ok_count = 0;
3576 int err;
3577
3578 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3579 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3580 if (err) {
3581 if (err_stop)
3582 return err;
3583 } else {
3584 ok_count++;
3585 }
3586 }
3587 return ok_count;
3588}
3589
3590int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3591 void *type_data, bool err_stop, bool rtnl_held)
3592{
3593 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3594 int ok_count;
3595
3596retry:
3597 if (take_rtnl)
3598 rtnl_lock();
3599 down_read(&block->cb_lock);
3600 /* Need to obtain rtnl lock if block is bound to devs that require it.
3601 * In block bind code cb_lock is obtained while holding rtnl, so we must
3602 * obtain the locks in same order here.
3603 */
3604 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3605 up_read(&block->cb_lock);
3606 take_rtnl = true;
3607 goto retry;
3608 }
3609
3610 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3611
3612 up_read(&block->cb_lock);
3613 if (take_rtnl)
3614 rtnl_unlock();
3615 return ok_count;
3616}
3617EXPORT_SYMBOL(tc_setup_cb_call);
3618
3619/* Non-destructive filter add. If filter that wasn't already in hardware is
3620 * successfully offloaded, increment block offloads counter. On failure,
3621 * previously offloaded filter is considered to be intact and offloads counter
3622 * is not decremented.
3623 */
3624
3625int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3626 enum tc_setup_type type, void *type_data, bool err_stop,
3627 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3628{
3629 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3630 int ok_count;
3631
3632retry:
3633 if (take_rtnl)
3634 rtnl_lock();
3635 down_read(&block->cb_lock);
3636 /* Need to obtain rtnl lock if block is bound to devs that require it.
3637 * In block bind code cb_lock is obtained while holding rtnl, so we must
3638 * obtain the locks in same order here.
3639 */
3640 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3641 up_read(&block->cb_lock);
3642 take_rtnl = true;
3643 goto retry;
3644 }
3645
3646 /* Make sure all netdevs sharing this block are offload-capable. */
3647 if (block->nooffloaddevcnt && err_stop) {
3648 ok_count = -EOPNOTSUPP;
3649 goto err_unlock;
3650 }
3651
3652 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3653 if (ok_count < 0)
3654 goto err_unlock;
3655
3656 if (tp->ops->hw_add)
3657 tp->ops->hw_add(tp, type_data);
3658 if (ok_count > 0)
3659 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3660 ok_count, true);
3661err_unlock:
3662 up_read(&block->cb_lock);
3663 if (take_rtnl)
3664 rtnl_unlock();
3665 return min(ok_count, 0);
3666}
3667EXPORT_SYMBOL(tc_setup_cb_add);
3668
3669/* Destructive filter replace. If filter that wasn't already in hardware is
3670 * successfully offloaded, increment block offload counter. On failure,
3671 * previously offloaded filter is considered to be destroyed and offload counter
3672 * is decremented.
3673 */
3674
3675int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3676 enum tc_setup_type type, void *type_data, bool err_stop,
3677 u32 *old_flags, unsigned int *old_in_hw_count,
3678 u32 *new_flags, unsigned int *new_in_hw_count,
3679 bool rtnl_held)
3680{
3681 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3682 int ok_count;
3683
3684retry:
3685 if (take_rtnl)
3686 rtnl_lock();
3687 down_read(&block->cb_lock);
3688 /* Need to obtain rtnl lock if block is bound to devs that require it.
3689 * In block bind code cb_lock is obtained while holding rtnl, so we must
3690 * obtain the locks in same order here.
3691 */
3692 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3693 up_read(&block->cb_lock);
3694 take_rtnl = true;
3695 goto retry;
3696 }
3697
3698 /* Make sure all netdevs sharing this block are offload-capable. */
3699 if (block->nooffloaddevcnt && err_stop) {
3700 ok_count = -EOPNOTSUPP;
3701 goto err_unlock;
3702 }
3703
3704 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3705 if (tp->ops->hw_del)
3706 tp->ops->hw_del(tp, type_data);
3707
3708 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3709 if (ok_count < 0)
3710 goto err_unlock;
3711
3712 if (tp->ops->hw_add)
3713 tp->ops->hw_add(tp, type_data);
3714 if (ok_count > 0)
3715 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3716 new_flags, ok_count, true);
3717err_unlock:
3718 up_read(&block->cb_lock);
3719 if (take_rtnl)
3720 rtnl_unlock();
3721 return min(ok_count, 0);
3722}
3723EXPORT_SYMBOL(tc_setup_cb_replace);
3724
3725/* Destroy filter and decrement block offload counter, if filter was previously
3726 * offloaded.
3727 */
3728
3729int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3730 enum tc_setup_type type, void *type_data, bool err_stop,
3731 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3732{
3733 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3734 int ok_count;
3735
3736retry:
3737 if (take_rtnl)
3738 rtnl_lock();
3739 down_read(&block->cb_lock);
3740 /* Need to obtain rtnl lock if block is bound to devs that require it.
3741 * In block bind code cb_lock is obtained while holding rtnl, so we must
3742 * obtain the locks in same order here.
3743 */
3744 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3745 up_read(&block->cb_lock);
3746 take_rtnl = true;
3747 goto retry;
3748 }
3749
3750 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3751
3752 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3753 if (tp->ops->hw_del)
3754 tp->ops->hw_del(tp, type_data);
3755
3756 up_read(&block->cb_lock);
3757 if (take_rtnl)
3758 rtnl_unlock();
3759 return min(ok_count, 0);
3760}
3761EXPORT_SYMBOL(tc_setup_cb_destroy);
3762
3763int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3764 bool add, flow_setup_cb_t *cb,
3765 enum tc_setup_type type, void *type_data,
3766 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3767{
3768 int err = cb(type, type_data, cb_priv);
3769
3770 if (err) {
3771 if (add && tc_skip_sw(*flags))
3772 return err;
3773 } else {
3774 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3775 add);
3776 }
3777
3778 return 0;
3779}
3780EXPORT_SYMBOL(tc_setup_cb_reoffload);
3781
3782static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3783 const struct tc_action *act)
3784{
3785 struct tc_cookie *user_cookie;
3786 int err = 0;
3787
3788 rcu_read_lock();
3789 user_cookie = rcu_dereference(act->user_cookie);
3790 if (user_cookie) {
3791 entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3792 user_cookie->len,
3793 GFP_ATOMIC);
3794 if (!entry->user_cookie)
3795 err = -ENOMEM;
3796 }
3797 rcu_read_unlock();
3798 return err;
3799}
3800
3801static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
3802{
3803 flow_action_cookie_destroy(entry->user_cookie);
3804}
3805
3806void tc_cleanup_offload_action(struct flow_action *flow_action)
3807{
3808 struct flow_action_entry *entry;
3809 int i;
3810
3811 flow_action_for_each(i, entry, flow_action) {
3812 tcf_act_put_user_cookie(entry);
3813 if (entry->destructor)
3814 entry->destructor(entry->destructor_priv);
3815 }
3816}
3817EXPORT_SYMBOL(tc_cleanup_offload_action);
3818
3819static int tc_setup_offload_act(struct tc_action *act,
3820 struct flow_action_entry *entry,
3821 u32 *index_inc,
3822 struct netlink_ext_ack *extack)
3823{
3824#ifdef CONFIG_NET_CLS_ACT
3825 if (act->ops->offload_act_setup) {
3826 return act->ops->offload_act_setup(act, entry, index_inc, true,
3827 extack);
3828 } else {
3829 NL_SET_ERR_MSG(extack, "Action does not support offload");
3830 return -EOPNOTSUPP;
3831 }
3832#else
3833 return 0;
3834#endif
3835}
3836
3837int tc_setup_action(struct flow_action *flow_action,
3838 struct tc_action *actions[],
3839 u32 miss_cookie_base,
3840 struct netlink_ext_ack *extack)
3841{
3842 int i, j, k, index, err = 0;
3843 struct tc_action *act;
3844
3845 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3846 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3847 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3848
3849 if (!actions)
3850 return 0;
3851
3852 j = 0;
3853 tcf_act_for_each_action(i, act, actions) {
3854 struct flow_action_entry *entry;
3855
3856 entry = &flow_action->entries[j];
3857 spin_lock_bh(&act->tcfa_lock);
3858 err = tcf_act_get_user_cookie(entry, act);
3859 if (err)
3860 goto err_out_locked;
3861
3862 index = 0;
3863 err = tc_setup_offload_act(act, entry, &index, extack);
3864 if (err)
3865 goto err_out_locked;
3866
3867 for (k = 0; k < index ; k++) {
3868 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3869 entry[k].hw_index = act->tcfa_index;
3870 entry[k].cookie = (unsigned long)act;
3871 entry[k].miss_cookie =
3872 tcf_exts_miss_cookie_get(miss_cookie_base, i);
3873 }
3874
3875 j += index;
3876
3877 spin_unlock_bh(&act->tcfa_lock);
3878 }
3879
3880err_out:
3881 if (err)
3882 tc_cleanup_offload_action(flow_action);
3883
3884 return err;
3885err_out_locked:
3886 spin_unlock_bh(&act->tcfa_lock);
3887 goto err_out;
3888}
3889
3890int tc_setup_offload_action(struct flow_action *flow_action,
3891 const struct tcf_exts *exts,
3892 struct netlink_ext_ack *extack)
3893{
3894#ifdef CONFIG_NET_CLS_ACT
3895 u32 miss_cookie_base;
3896
3897 if (!exts)
3898 return 0;
3899
3900 miss_cookie_base = exts->miss_cookie_node ?
3901 exts->miss_cookie_node->miss_cookie_base : 0;
3902 return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3903 extack);
3904#else
3905 return 0;
3906#endif
3907}
3908EXPORT_SYMBOL(tc_setup_offload_action);
3909
3910unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3911{
3912 unsigned int num_acts = 0;
3913 struct tc_action *act;
3914 int i;
3915
3916 tcf_exts_for_each_action(i, act, exts) {
3917 if (is_tcf_pedit(act))
3918 num_acts += tcf_pedit_nkeys(act);
3919 else
3920 num_acts++;
3921 }
3922 return num_acts;
3923}
3924EXPORT_SYMBOL(tcf_exts_num_actions);
3925
3926#ifdef CONFIG_NET_CLS_ACT
3927static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3928 u32 *p_block_index,
3929 struct netlink_ext_ack *extack)
3930{
3931 *p_block_index = nla_get_u32(block_index_attr);
3932 if (!*p_block_index) {
3933 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3934 return -EINVAL;
3935 }
3936
3937 return 0;
3938}
3939
3940int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3941 enum flow_block_binder_type binder_type,
3942 struct nlattr *block_index_attr,
3943 struct netlink_ext_ack *extack)
3944{
3945 u32 block_index;
3946 int err;
3947
3948 if (!block_index_attr)
3949 return 0;
3950
3951 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3952 if (err)
3953 return err;
3954
3955 qe->info.binder_type = binder_type;
3956 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3957 qe->info.chain_head_change_priv = &qe->filter_chain;
3958 qe->info.block_index = block_index;
3959
3960 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3961}
3962EXPORT_SYMBOL(tcf_qevent_init);
3963
3964void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3965{
3966 if (qe->info.block_index)
3967 tcf_block_put_ext(qe->block, sch, &qe->info);
3968}
3969EXPORT_SYMBOL(tcf_qevent_destroy);
3970
3971int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3972 struct netlink_ext_ack *extack)
3973{
3974 u32 block_index;
3975 int err;
3976
3977 if (!block_index_attr)
3978 return 0;
3979
3980 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3981 if (err)
3982 return err;
3983
3984 /* Bounce newly-configured block or change in block. */
3985 if (block_index != qe->info.block_index) {
3986 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3987 return -EINVAL;
3988 }
3989
3990 return 0;
3991}
3992EXPORT_SYMBOL(tcf_qevent_validate_change);
3993
3994struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3995 struct sk_buff **to_free, int *ret)
3996{
3997 struct tcf_result cl_res;
3998 struct tcf_proto *fl;
3999
4000 if (!qe->info.block_index)
4001 return skb;
4002
4003 fl = rcu_dereference_bh(qe->filter_chain);
4004
4005 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
4006 case TC_ACT_SHOT:
4007 qdisc_qstats_drop(sch);
4008 __qdisc_drop(skb, to_free);
4009 *ret = __NET_XMIT_BYPASS;
4010 return NULL;
4011 case TC_ACT_STOLEN:
4012 case TC_ACT_QUEUED:
4013 case TC_ACT_TRAP:
4014 __qdisc_drop(skb, to_free);
4015 *ret = __NET_XMIT_STOLEN;
4016 return NULL;
4017 case TC_ACT_REDIRECT:
4018 skb_do_redirect(skb);
4019 *ret = __NET_XMIT_STOLEN;
4020 return NULL;
4021 }
4022
4023 return skb;
4024}
4025EXPORT_SYMBOL(tcf_qevent_handle);
4026
4027int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
4028{
4029 if (!qe->info.block_index)
4030 return 0;
4031 return nla_put_u32(skb, attr_name, qe->info.block_index);
4032}
4033EXPORT_SYMBOL(tcf_qevent_dump);
4034#endif
4035
4036static __net_init int tcf_net_init(struct net *net)
4037{
4038 struct tcf_net *tn = net_generic(net, tcf_net_id);
4039
4040 spin_lock_init(&tn->idr_lock);
4041 idr_init(&tn->idr);
4042 return 0;
4043}
4044
4045static void __net_exit tcf_net_exit(struct net *net)
4046{
4047 struct tcf_net *tn = net_generic(net, tcf_net_id);
4048
4049 idr_destroy(&tn->idr);
4050}
4051
4052static struct pernet_operations tcf_net_ops = {
4053 .init = tcf_net_init,
4054 .exit = tcf_net_exit,
4055 .id = &tcf_net_id,
4056 .size = sizeof(struct tcf_net),
4057};
4058
4059static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = {
4060 {.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter,
4061 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4062 {.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter,
4063 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4064 {.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter,
4065 .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED},
4066 {.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain},
4067 {.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain},
4068 {.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain,
4069 .dumpit = tc_dump_chain},
4070};
4071
4072static int __init tc_filter_init(void)
4073{
4074 int err;
4075
4076 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4077 if (!tc_filter_wq)
4078 return -ENOMEM;
4079
4080 err = register_pernet_subsys(&tcf_net_ops);
4081 if (err)
4082 goto err_register_pernet_subsys;
4083
4084 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4085 rtnl_register_many(tc_filter_rtnl_msg_handlers);
4086
4087 return 0;
4088
4089err_register_pernet_subsys:
4090 destroy_workqueue(tc_filter_wq);
4091 return err;
4092}
4093
4094subsys_initcall(tc_filter_init);