Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_api.c	Packet classifier API.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *
   7 * Changes:
   8 *
   9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/types.h>
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/err.h>
  18#include <linux/skbuff.h>
  19#include <linux/init.h>
  20#include <linux/kmod.h>
  21#include <linux/slab.h>
  22#include <linux/idr.h>
  23#include <linux/rhashtable.h>
  24#include <linux/jhash.h>
 
 
  25#include <net/net_namespace.h>
  26#include <net/sock.h>
  27#include <net/netlink.h>
  28#include <net/pkt_sched.h>
  29#include <net/pkt_cls.h>
  30#include <net/tc_act/tc_pedit.h>
  31#include <net/tc_act/tc_mirred.h>
  32#include <net/tc_act/tc_vlan.h>
  33#include <net/tc_act/tc_tunnel_key.h>
  34#include <net/tc_act/tc_csum.h>
  35#include <net/tc_act/tc_gact.h>
  36#include <net/tc_act/tc_police.h>
  37#include <net/tc_act/tc_sample.h>
  38#include <net/tc_act/tc_skbedit.h>
  39#include <net/tc_act/tc_ct.h>
  40#include <net/tc_act/tc_mpls.h>
 
  41#include <net/flow_offload.h>
  42
  43extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
  44
  45/* The list of all installed classifier types */
  46static LIST_HEAD(tcf_proto_base);
  47
  48/* Protects list of registered TC modules. It is pure SMP lock. */
  49static DEFINE_RWLOCK(cls_mod_lock);
  50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
  52{
  53	return jhash_3words(tp->chain->index, tp->prio,
  54			    (__force __u32)tp->protocol, 0);
  55}
  56
  57static void tcf_proto_signal_destroying(struct tcf_chain *chain,
  58					struct tcf_proto *tp)
  59{
  60	struct tcf_block *block = chain->block;
  61
  62	mutex_lock(&block->proto_destroy_lock);
  63	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
  64		     destroy_obj_hashfn(tp));
  65	mutex_unlock(&block->proto_destroy_lock);
  66}
  67
  68static bool tcf_proto_cmp(const struct tcf_proto *tp1,
  69			  const struct tcf_proto *tp2)
  70{
  71	return tp1->chain->index == tp2->chain->index &&
  72	       tp1->prio == tp2->prio &&
  73	       tp1->protocol == tp2->protocol;
  74}
  75
  76static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
  77					struct tcf_proto *tp)
  78{
  79	u32 hash = destroy_obj_hashfn(tp);
  80	struct tcf_proto *iter;
  81	bool found = false;
  82
  83	rcu_read_lock();
  84	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
  85				   destroy_ht_node, hash) {
  86		if (tcf_proto_cmp(tp, iter)) {
  87			found = true;
  88			break;
  89		}
  90	}
  91	rcu_read_unlock();
  92
  93	return found;
  94}
  95
  96static void
  97tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
  98{
  99	struct tcf_block *block = chain->block;
 100
 101	mutex_lock(&block->proto_destroy_lock);
 102	if (hash_hashed(&tp->destroy_ht_node))
 103		hash_del_rcu(&tp->destroy_ht_node);
 104	mutex_unlock(&block->proto_destroy_lock);
 105}
 106
 107/* Find classifier type by string name */
 108
 109static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
 110{
 111	const struct tcf_proto_ops *t, *res = NULL;
 112
 113	if (kind) {
 114		read_lock(&cls_mod_lock);
 115		list_for_each_entry(t, &tcf_proto_base, head) {
 116			if (strcmp(kind, t->kind) == 0) {
 117				if (try_module_get(t->owner))
 118					res = t;
 119				break;
 120			}
 121		}
 122		read_unlock(&cls_mod_lock);
 123	}
 124	return res;
 125}
 126
 127static const struct tcf_proto_ops *
 128tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
 129		     struct netlink_ext_ack *extack)
 130{
 131	const struct tcf_proto_ops *ops;
 132
 133	ops = __tcf_proto_lookup_ops(kind);
 134	if (ops)
 135		return ops;
 136#ifdef CONFIG_MODULES
 137	if (rtnl_held)
 138		rtnl_unlock();
 139	request_module("cls_%s", kind);
 140	if (rtnl_held)
 141		rtnl_lock();
 142	ops = __tcf_proto_lookup_ops(kind);
 143	/* We dropped the RTNL semaphore in order to perform
 144	 * the module load. So, even if we succeeded in loading
 145	 * the module we have to replay the request. We indicate
 146	 * this using -EAGAIN.
 147	 */
 148	if (ops) {
 149		module_put(ops->owner);
 150		return ERR_PTR(-EAGAIN);
 151	}
 152#endif
 153	NL_SET_ERR_MSG(extack, "TC classifier not found");
 154	return ERR_PTR(-ENOENT);
 155}
 156
 157/* Register(unregister) new classifier type */
 158
 159int register_tcf_proto_ops(struct tcf_proto_ops *ops)
 160{
 161	struct tcf_proto_ops *t;
 162	int rc = -EEXIST;
 163
 164	write_lock(&cls_mod_lock);
 165	list_for_each_entry(t, &tcf_proto_base, head)
 166		if (!strcmp(ops->kind, t->kind))
 167			goto out;
 168
 169	list_add_tail(&ops->head, &tcf_proto_base);
 170	rc = 0;
 171out:
 172	write_unlock(&cls_mod_lock);
 173	return rc;
 174}
 175EXPORT_SYMBOL(register_tcf_proto_ops);
 176
 177static struct workqueue_struct *tc_filter_wq;
 178
 179int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
 180{
 181	struct tcf_proto_ops *t;
 182	int rc = -ENOENT;
 183
 184	/* Wait for outstanding call_rcu()s, if any, from a
 185	 * tcf_proto_ops's destroy() handler.
 186	 */
 187	rcu_barrier();
 188	flush_workqueue(tc_filter_wq);
 189
 190	write_lock(&cls_mod_lock);
 191	list_for_each_entry(t, &tcf_proto_base, head) {
 192		if (t == ops) {
 193			list_del(&t->head);
 194			rc = 0;
 195			break;
 196		}
 197	}
 198	write_unlock(&cls_mod_lock);
 199	return rc;
 
 200}
 201EXPORT_SYMBOL(unregister_tcf_proto_ops);
 202
 203bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
 204{
 205	INIT_RCU_WORK(rwork, func);
 206	return queue_rcu_work(tc_filter_wq, rwork);
 207}
 208EXPORT_SYMBOL(tcf_queue_work);
 209
 210/* Select new prio value from the range, managed by kernel. */
 211
 212static inline u32 tcf_auto_prio(struct tcf_proto *tp)
 213{
 214	u32 first = TC_H_MAKE(0xC0000000U, 0U);
 215
 216	if (tp)
 217		first = tp->prio - 1;
 218
 219	return TC_H_MAJ(first);
 220}
 221
 222static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
 223{
 224	if (kind)
 225		return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
 226	memset(name, 0, IFNAMSIZ);
 227	return false;
 228}
 229
 230static bool tcf_proto_is_unlocked(const char *kind)
 231{
 232	const struct tcf_proto_ops *ops;
 233	bool ret;
 234
 235	if (strlen(kind) == 0)
 236		return false;
 237
 238	ops = tcf_proto_lookup_ops(kind, false, NULL);
 239	/* On error return false to take rtnl lock. Proto lookup/create
 240	 * functions will perform lookup again and properly handle errors.
 241	 */
 242	if (IS_ERR(ops))
 243		return false;
 244
 245	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
 246	module_put(ops->owner);
 247	return ret;
 248}
 249
 250static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
 251					  u32 prio, struct tcf_chain *chain,
 252					  bool rtnl_held,
 253					  struct netlink_ext_ack *extack)
 254{
 255	struct tcf_proto *tp;
 256	int err;
 257
 258	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
 259	if (!tp)
 260		return ERR_PTR(-ENOBUFS);
 261
 262	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
 263	if (IS_ERR(tp->ops)) {
 264		err = PTR_ERR(tp->ops);
 265		goto errout;
 266	}
 267	tp->classify = tp->ops->classify;
 268	tp->protocol = protocol;
 269	tp->prio = prio;
 270	tp->chain = chain;
 
 271	spin_lock_init(&tp->lock);
 272	refcount_set(&tp->refcnt, 1);
 273
 274	err = tp->ops->init(tp);
 275	if (err) {
 276		module_put(tp->ops->owner);
 277		goto errout;
 278	}
 279	return tp;
 280
 281errout:
 282	kfree(tp);
 283	return ERR_PTR(err);
 284}
 285
 286static void tcf_proto_get(struct tcf_proto *tp)
 287{
 288	refcount_inc(&tp->refcnt);
 289}
 290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 291static void tcf_chain_put(struct tcf_chain *chain);
 292
 293static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
 294			      bool sig_destroy, struct netlink_ext_ack *extack)
 295{
 296	tp->ops->destroy(tp, rtnl_held, extack);
 
 297	if (sig_destroy)
 298		tcf_proto_signal_destroyed(tp->chain, tp);
 299	tcf_chain_put(tp->chain);
 300	module_put(tp->ops->owner);
 301	kfree_rcu(tp, rcu);
 302}
 303
 304static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
 305			  struct netlink_ext_ack *extack)
 306{
 307	if (refcount_dec_and_test(&tp->refcnt))
 308		tcf_proto_destroy(tp, rtnl_held, true, extack);
 309}
 310
 311static int walker_check_empty(struct tcf_proto *tp, void *fh,
 312			      struct tcf_walker *arg)
 313{
 314	if (fh) {
 315		arg->nonempty = true;
 316		return -1;
 317	}
 318	return 0;
 319}
 320
 321static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held)
 322{
 323	struct tcf_walker walker = { .fn = walker_check_empty, };
 
 324
 325	if (tp->ops->walk) {
 326		tp->ops->walk(tp, &walker, rtnl_held);
 327		return !walker.nonempty;
 328	}
 329	return true;
 330}
 331
 332static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held)
 333{
 334	spin_lock(&tp->lock);
 335	if (tcf_proto_is_empty(tp, rtnl_held))
 336		tp->deleting = true;
 337	spin_unlock(&tp->lock);
 338	return tp->deleting;
 339}
 340
 341static void tcf_proto_mark_delete(struct tcf_proto *tp)
 342{
 343	spin_lock(&tp->lock);
 344	tp->deleting = true;
 345	spin_unlock(&tp->lock);
 346}
 347
 348static bool tcf_proto_is_deleting(struct tcf_proto *tp)
 349{
 350	bool deleting;
 351
 352	spin_lock(&tp->lock);
 353	deleting = tp->deleting;
 354	spin_unlock(&tp->lock);
 355
 356	return deleting;
 357}
 358
 359#define ASSERT_BLOCK_LOCKED(block)					\
 360	lockdep_assert_held(&(block)->lock)
 361
 362struct tcf_filter_chain_list_item {
 363	struct list_head list;
 364	tcf_chain_head_change_t *chain_head_change;
 365	void *chain_head_change_priv;
 366};
 367
 368static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 369					  u32 chain_index)
 370{
 371	struct tcf_chain *chain;
 372
 373	ASSERT_BLOCK_LOCKED(block);
 374
 375	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
 376	if (!chain)
 377		return NULL;
 378	list_add_tail(&chain->list, &block->chain_list);
 379	mutex_init(&chain->filter_chain_lock);
 380	chain->block = block;
 381	chain->index = chain_index;
 382	chain->refcnt = 1;
 383	if (!chain->index)
 384		block->chain0.chain = chain;
 385	return chain;
 386}
 387
 388static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
 389				       struct tcf_proto *tp_head)
 390{
 391	if (item->chain_head_change)
 392		item->chain_head_change(tp_head, item->chain_head_change_priv);
 393}
 394
 395static void tcf_chain0_head_change(struct tcf_chain *chain,
 396				   struct tcf_proto *tp_head)
 397{
 398	struct tcf_filter_chain_list_item *item;
 399	struct tcf_block *block = chain->block;
 400
 401	if (chain->index)
 402		return;
 403
 404	mutex_lock(&block->lock);
 405	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
 406		tcf_chain_head_change_item(item, tp_head);
 407	mutex_unlock(&block->lock);
 408}
 409
 410/* Returns true if block can be safely freed. */
 411
 412static bool tcf_chain_detach(struct tcf_chain *chain)
 413{
 414	struct tcf_block *block = chain->block;
 415
 416	ASSERT_BLOCK_LOCKED(block);
 417
 418	list_del(&chain->list);
 419	if (!chain->index)
 420		block->chain0.chain = NULL;
 421
 422	if (list_empty(&block->chain_list) &&
 423	    refcount_read(&block->refcnt) == 0)
 424		return true;
 425
 426	return false;
 427}
 428
 429static void tcf_block_destroy(struct tcf_block *block)
 430{
 431	mutex_destroy(&block->lock);
 432	mutex_destroy(&block->proto_destroy_lock);
 
 433	kfree_rcu(block, rcu);
 434}
 435
 436static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
 437{
 438	struct tcf_block *block = chain->block;
 439
 440	mutex_destroy(&chain->filter_chain_lock);
 441	kfree_rcu(chain, rcu);
 442	if (free_block)
 443		tcf_block_destroy(block);
 444}
 445
 446static void tcf_chain_hold(struct tcf_chain *chain)
 447{
 448	ASSERT_BLOCK_LOCKED(chain->block);
 449
 450	++chain->refcnt;
 451}
 452
 453static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
 454{
 455	ASSERT_BLOCK_LOCKED(chain->block);
 456
 457	/* In case all the references are action references, this
 458	 * chain should not be shown to the user.
 459	 */
 460	return chain->refcnt == chain->action_refcnt;
 461}
 462
 463static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
 464					  u32 chain_index)
 465{
 466	struct tcf_chain *chain;
 467
 468	ASSERT_BLOCK_LOCKED(block);
 469
 470	list_for_each_entry(chain, &block->chain_list, list) {
 471		if (chain->index == chain_index)
 472			return chain;
 473	}
 474	return NULL;
 475}
 476
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
 478			   u32 seq, u16 flags, int event, bool unicast);
 
 479
 480static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
 481					 u32 chain_index, bool create,
 482					 bool by_act)
 483{
 484	struct tcf_chain *chain = NULL;
 485	bool is_first_reference;
 486
 487	mutex_lock(&block->lock);
 488	chain = tcf_chain_lookup(block, chain_index);
 489	if (chain) {
 490		tcf_chain_hold(chain);
 491	} else {
 492		if (!create)
 493			goto errout;
 494		chain = tcf_chain_create(block, chain_index);
 495		if (!chain)
 496			goto errout;
 497	}
 498
 499	if (by_act)
 500		++chain->action_refcnt;
 501	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
 502	mutex_unlock(&block->lock);
 503
 504	/* Send notification only in case we got the first
 505	 * non-action reference. Until then, the chain acts only as
 506	 * a placeholder for actions pointing to it and user ought
 507	 * not know about them.
 508	 */
 509	if (is_first_reference && !by_act)
 510		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
 511				RTM_NEWCHAIN, false);
 512
 513	return chain;
 514
 515errout:
 516	mutex_unlock(&block->lock);
 517	return chain;
 518}
 519
 520static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 521				       bool create)
 522{
 523	return __tcf_chain_get(block, chain_index, create, false);
 524}
 525
 526struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
 527{
 528	return __tcf_chain_get(block, chain_index, true, true);
 529}
 530EXPORT_SYMBOL(tcf_chain_get_by_act);
 531
 532static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
 533			       void *tmplt_priv);
 534static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
 535				  void *tmplt_priv, u32 chain_index,
 536				  struct tcf_block *block, struct sk_buff *oskb,
 537				  u32 seq, u16 flags, bool unicast);
 538
 539static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
 540			    bool explicitly_created)
 541{
 542	struct tcf_block *block = chain->block;
 543	const struct tcf_proto_ops *tmplt_ops;
 
 544	bool free_block = false;
 545	unsigned int refcnt;
 546	void *tmplt_priv;
 547
 548	mutex_lock(&block->lock);
 549	if (explicitly_created) {
 550		if (!chain->explicitly_created) {
 551			mutex_unlock(&block->lock);
 552			return;
 553		}
 554		chain->explicitly_created = false;
 555	}
 556
 557	if (by_act)
 558		chain->action_refcnt--;
 559
 560	/* tc_chain_notify_delete can't be called while holding block lock.
 561	 * However, when block is unlocked chain can be changed concurrently, so
 562	 * save these to temporary variables.
 563	 */
 564	refcnt = --chain->refcnt;
 
 565	tmplt_ops = chain->tmplt_ops;
 566	tmplt_priv = chain->tmplt_priv;
 567
 568	/* The last dropped non-action reference will trigger notification. */
 569	if (refcnt - chain->action_refcnt == 0 && !by_act) {
 570		tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
 571				       block, NULL, 0, 0, false);
 572		/* Last reference to chain, no need to lock. */
 573		chain->flushing = false;
 574	}
 575
 576	if (refcnt == 0)
 577		free_block = tcf_chain_detach(chain);
 578	mutex_unlock(&block->lock);
 579
 580	if (refcnt == 0) {
 581		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
 582		tcf_chain_destroy(chain, free_block);
 583	}
 584}
 585
 586static void tcf_chain_put(struct tcf_chain *chain)
 587{
 588	__tcf_chain_put(chain, false, false);
 589}
 590
 591void tcf_chain_put_by_act(struct tcf_chain *chain)
 592{
 593	__tcf_chain_put(chain, true, false);
 594}
 595EXPORT_SYMBOL(tcf_chain_put_by_act);
 596
 597static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
 598{
 599	__tcf_chain_put(chain, false, true);
 600}
 601
 602static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
 603{
 604	struct tcf_proto *tp, *tp_next;
 605
 606	mutex_lock(&chain->filter_chain_lock);
 607	tp = tcf_chain_dereference(chain->filter_chain, chain);
 608	while (tp) {
 609		tp_next = rcu_dereference_protected(tp->next, 1);
 610		tcf_proto_signal_destroying(chain, tp);
 611		tp = tp_next;
 612	}
 613	tp = tcf_chain_dereference(chain->filter_chain, chain);
 614	RCU_INIT_POINTER(chain->filter_chain, NULL);
 615	tcf_chain0_head_change(chain, NULL);
 616	chain->flushing = true;
 617	mutex_unlock(&chain->filter_chain_lock);
 618
 619	while (tp) {
 620		tp_next = rcu_dereference_protected(tp->next, 1);
 621		tcf_proto_put(tp, rtnl_held, NULL);
 622		tp = tp_next;
 623	}
 624}
 625
 626static int tcf_block_setup(struct tcf_block *block,
 627			   struct flow_block_offload *bo);
 628
 629static void tc_indr_block_ing_cmd(struct net_device *dev,
 630				  struct tcf_block *block,
 631				  flow_indr_block_bind_cb_t *cb,
 632				  void *cb_priv,
 633				  enum flow_block_command command)
 634{
 635	struct flow_block_offload bo = {
 636		.command	= command,
 637		.binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
 638		.net		= dev_net(dev),
 639		.block_shared	= tcf_block_non_null_shared(block),
 640	};
 641	INIT_LIST_HEAD(&bo.cb_list);
 642
 643	if (!block)
 644		return;
 645
 646	bo.block = &block->flow_block;
 647
 648	down_write(&block->cb_lock);
 649	cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
 650
 651	tcf_block_setup(block, &bo);
 652	up_write(&block->cb_lock);
 653}
 654
 655static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
 656{
 657	const struct Qdisc_class_ops *cops;
 658	struct Qdisc *qdisc;
 659
 660	if (!dev_ingress_queue(dev))
 661		return NULL;
 662
 663	qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
 664	if (!qdisc)
 665		return NULL;
 666
 667	cops = qdisc->ops->cl_ops;
 668	if (!cops)
 669		return NULL;
 670
 671	if (!cops->tcf_block)
 672		return NULL;
 673
 674	return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
 675}
 676
 677static void tc_indr_block_get_and_ing_cmd(struct net_device *dev,
 678					  flow_indr_block_bind_cb_t *cb,
 679					  void *cb_priv,
 680					  enum flow_block_command command)
 681{
 682	struct tcf_block *block = tc_dev_ingress_block(dev);
 683
 684	tc_indr_block_ing_cmd(dev, block, cb, cb_priv, command);
 685}
 686
 687static void tc_indr_block_call(struct tcf_block *block,
 688			       struct net_device *dev,
 689			       struct tcf_block_ext_info *ei,
 690			       enum flow_block_command command,
 691			       struct netlink_ext_ack *extack)
 692{
 693	struct flow_block_offload bo = {
 694		.command	= command,
 695		.binder_type	= ei->binder_type,
 696		.net		= dev_net(dev),
 697		.block		= &block->flow_block,
 698		.block_shared	= tcf_block_shared(block),
 699		.extack		= extack,
 700	};
 701	INIT_LIST_HEAD(&bo.cb_list);
 702
 703	flow_indr_block_call(dev, &bo, command);
 704	tcf_block_setup(block, &bo);
 
 
 
 
 
 
 
 
 
 705}
 706
 707static bool tcf_block_offload_in_use(struct tcf_block *block)
 708{
 709	return atomic_read(&block->offloadcnt);
 710}
 711
 712static int tcf_block_offload_cmd(struct tcf_block *block,
 713				 struct net_device *dev,
 714				 struct tcf_block_ext_info *ei,
 715				 enum flow_block_command command,
 716				 struct netlink_ext_ack *extack)
 717{
 718	struct flow_block_offload bo = {};
 719	int err;
 720
 721	bo.net = dev_net(dev);
 722	bo.command = command;
 723	bo.binder_type = ei->binder_type;
 724	bo.block = &block->flow_block;
 725	bo.block_shared = tcf_block_shared(block);
 726	bo.extack = extack;
 727	INIT_LIST_HEAD(&bo.cb_list);
 728
 729	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 730	if (err < 0)
 731		return err;
 
 
 
 
 
 
 
 
 
 732
 733	return tcf_block_setup(block, &bo);
 
 
 
 
 734}
 735
 736static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
 737				  struct tcf_block_ext_info *ei,
 738				  struct netlink_ext_ack *extack)
 739{
 740	struct net_device *dev = q->dev_queue->dev;
 741	int err;
 742
 743	down_write(&block->cb_lock);
 744	if (!dev->netdev_ops->ndo_setup_tc)
 745		goto no_offload_dev_inc;
 746
 747	/* If tc offload feature is disabled and the block we try to bind
 748	 * to already has some offloaded filters, forbid to bind.
 749	 */
 750	if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
 
 
 751		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
 752		err = -EOPNOTSUPP;
 753		goto err_unlock;
 754	}
 755
 756	err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
 757	if (err == -EOPNOTSUPP)
 758		goto no_offload_dev_inc;
 759	if (err)
 760		goto err_unlock;
 761
 762	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
 763	up_write(&block->cb_lock);
 764	return 0;
 765
 766no_offload_dev_inc:
 767	if (tcf_block_offload_in_use(block)) {
 768		err = -EOPNOTSUPP;
 769		goto err_unlock;
 770	}
 771	err = 0;
 772	block->nooffloaddevcnt++;
 773	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
 774err_unlock:
 775	up_write(&block->cb_lock);
 776	return err;
 777}
 778
 779static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 780				     struct tcf_block_ext_info *ei)
 781{
 782	struct net_device *dev = q->dev_queue->dev;
 783	int err;
 784
 785	down_write(&block->cb_lock);
 786	tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
 787
 788	if (!dev->netdev_ops->ndo_setup_tc)
 789		goto no_offload_dev_dec;
 790	err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
 791	if (err == -EOPNOTSUPP)
 792		goto no_offload_dev_dec;
 793	up_write(&block->cb_lock);
 794	return;
 795
 796no_offload_dev_dec:
 797	WARN_ON(block->nooffloaddevcnt-- == 0);
 798	up_write(&block->cb_lock);
 799}
 800
 801static int
 802tcf_chain0_head_change_cb_add(struct tcf_block *block,
 803			      struct tcf_block_ext_info *ei,
 804			      struct netlink_ext_ack *extack)
 805{
 806	struct tcf_filter_chain_list_item *item;
 807	struct tcf_chain *chain0;
 808
 809	item = kmalloc(sizeof(*item), GFP_KERNEL);
 810	if (!item) {
 811		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
 812		return -ENOMEM;
 813	}
 814	item->chain_head_change = ei->chain_head_change;
 815	item->chain_head_change_priv = ei->chain_head_change_priv;
 816
 817	mutex_lock(&block->lock);
 818	chain0 = block->chain0.chain;
 819	if (chain0)
 820		tcf_chain_hold(chain0);
 821	else
 822		list_add(&item->list, &block->chain0.filter_chain_list);
 823	mutex_unlock(&block->lock);
 824
 825	if (chain0) {
 826		struct tcf_proto *tp_head;
 827
 828		mutex_lock(&chain0->filter_chain_lock);
 829
 830		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
 831		if (tp_head)
 832			tcf_chain_head_change_item(item, tp_head);
 833
 834		mutex_lock(&block->lock);
 835		list_add(&item->list, &block->chain0.filter_chain_list);
 836		mutex_unlock(&block->lock);
 837
 838		mutex_unlock(&chain0->filter_chain_lock);
 839		tcf_chain_put(chain0);
 840	}
 841
 842	return 0;
 843}
 844
 845static void
 846tcf_chain0_head_change_cb_del(struct tcf_block *block,
 847			      struct tcf_block_ext_info *ei)
 848{
 849	struct tcf_filter_chain_list_item *item;
 850
 851	mutex_lock(&block->lock);
 852	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
 853		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
 854		    (item->chain_head_change == ei->chain_head_change &&
 855		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
 856			if (block->chain0.chain)
 857				tcf_chain_head_change_item(item, NULL);
 858			list_del(&item->list);
 859			mutex_unlock(&block->lock);
 860
 861			kfree(item);
 862			return;
 863		}
 864	}
 865	mutex_unlock(&block->lock);
 866	WARN_ON(1);
 867}
 868
 869struct tcf_net {
 870	spinlock_t idr_lock; /* Protects idr */
 871	struct idr idr;
 872};
 873
 874static unsigned int tcf_net_id;
 875
 876static int tcf_block_insert(struct tcf_block *block, struct net *net,
 877			    struct netlink_ext_ack *extack)
 878{
 879	struct tcf_net *tn = net_generic(net, tcf_net_id);
 880	int err;
 881
 882	idr_preload(GFP_KERNEL);
 883	spin_lock(&tn->idr_lock);
 884	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
 885			    GFP_NOWAIT);
 886	spin_unlock(&tn->idr_lock);
 887	idr_preload_end();
 888
 889	return err;
 890}
 891
 892static void tcf_block_remove(struct tcf_block *block, struct net *net)
 893{
 894	struct tcf_net *tn = net_generic(net, tcf_net_id);
 895
 896	spin_lock(&tn->idr_lock);
 897	idr_remove(&tn->idr, block->index);
 898	spin_unlock(&tn->idr_lock);
 899}
 900
 901static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
 902					  u32 block_index,
 903					  struct netlink_ext_ack *extack)
 904{
 905	struct tcf_block *block;
 906
 907	block = kzalloc(sizeof(*block), GFP_KERNEL);
 908	if (!block) {
 909		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
 910		return ERR_PTR(-ENOMEM);
 911	}
 912	mutex_init(&block->lock);
 913	mutex_init(&block->proto_destroy_lock);
 914	init_rwsem(&block->cb_lock);
 915	flow_block_init(&block->flow_block);
 916	INIT_LIST_HEAD(&block->chain_list);
 917	INIT_LIST_HEAD(&block->owner_list);
 918	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
 919
 920	refcount_set(&block->refcnt, 1);
 921	block->net = net;
 922	block->index = block_index;
 
 923
 924	/* Don't store q pointer for blocks which are shared */
 925	if (!tcf_block_shared(block))
 926		block->q = q;
 927	return block;
 928}
 929
 930static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
 931{
 932	struct tcf_net *tn = net_generic(net, tcf_net_id);
 933
 934	return idr_find(&tn->idr, block_index);
 935}
 
 936
 937static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
 938{
 939	struct tcf_block *block;
 940
 941	rcu_read_lock();
 942	block = tcf_block_lookup(net, block_index);
 943	if (block && !refcount_inc_not_zero(&block->refcnt))
 944		block = NULL;
 945	rcu_read_unlock();
 946
 947	return block;
 948}
 949
 950static struct tcf_chain *
 951__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
 952{
 953	mutex_lock(&block->lock);
 954	if (chain)
 955		chain = list_is_last(&chain->list, &block->chain_list) ?
 956			NULL : list_next_entry(chain, list);
 957	else
 958		chain = list_first_entry_or_null(&block->chain_list,
 959						 struct tcf_chain, list);
 960
 961	/* skip all action-only chains */
 962	while (chain && tcf_chain_held_by_acts_only(chain))
 963		chain = list_is_last(&chain->list, &block->chain_list) ?
 964			NULL : list_next_entry(chain, list);
 965
 966	if (chain)
 967		tcf_chain_hold(chain);
 968	mutex_unlock(&block->lock);
 969
 970	return chain;
 971}
 972
 973/* Function to be used by all clients that want to iterate over all chains on
 974 * block. It properly obtains block->lock and takes reference to chain before
 975 * returning it. Users of this function must be tolerant to concurrent chain
 976 * insertion/deletion or ensure that no concurrent chain modification is
 977 * possible. Note that all netlink dump callbacks cannot guarantee to provide
 978 * consistent dump because rtnl lock is released each time skb is filled with
 979 * data and sent to user-space.
 980 */
 981
 982struct tcf_chain *
 983tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
 984{
 985	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
 986
 987	if (chain)
 988		tcf_chain_put(chain);
 989
 990	return chain_next;
 991}
 992EXPORT_SYMBOL(tcf_get_next_chain);
 993
 994static struct tcf_proto *
 995__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
 996{
 997	u32 prio = 0;
 998
 999	ASSERT_RTNL();
1000	mutex_lock(&chain->filter_chain_lock);
1001
1002	if (!tp) {
1003		tp = tcf_chain_dereference(chain->filter_chain, chain);
1004	} else if (tcf_proto_is_deleting(tp)) {
1005		/* 'deleting' flag is set and chain->filter_chain_lock was
1006		 * unlocked, which means next pointer could be invalid. Restart
1007		 * search.
1008		 */
1009		prio = tp->prio + 1;
1010		tp = tcf_chain_dereference(chain->filter_chain, chain);
1011
1012		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1013			if (!tp->deleting && tp->prio >= prio)
1014				break;
1015	} else {
1016		tp = tcf_chain_dereference(tp->next, chain);
1017	}
1018
1019	if (tp)
1020		tcf_proto_get(tp);
1021
1022	mutex_unlock(&chain->filter_chain_lock);
1023
1024	return tp;
1025}
1026
1027/* Function to be used by all clients that want to iterate over all tp's on
1028 * chain. Users of this function must be tolerant to concurrent tp
1029 * insertion/deletion or ensure that no concurrent chain modification is
1030 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1031 * consistent dump because rtnl lock is released each time skb is filled with
1032 * data and sent to user-space.
1033 */
1034
1035struct tcf_proto *
1036tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1037		   bool rtnl_held)
1038{
1039	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1040
1041	if (tp)
1042		tcf_proto_put(tp, rtnl_held, NULL);
1043
1044	return tp_next;
1045}
1046EXPORT_SYMBOL(tcf_get_next_proto);
1047
1048static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1049{
1050	struct tcf_chain *chain;
1051
1052	/* Last reference to block. At this point chains cannot be added or
1053	 * removed concurrently.
1054	 */
1055	for (chain = tcf_get_next_chain(block, NULL);
1056	     chain;
1057	     chain = tcf_get_next_chain(block, chain)) {
1058		tcf_chain_put_explicitly_created(chain);
1059		tcf_chain_flush(chain, rtnl_held);
1060	}
1061}
1062
1063/* Lookup Qdisc and increments its reference counter.
1064 * Set parent, if necessary.
1065 */
1066
1067static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1068			    u32 *parent, int ifindex, bool rtnl_held,
1069			    struct netlink_ext_ack *extack)
1070{
1071	const struct Qdisc_class_ops *cops;
1072	struct net_device *dev;
1073	int err = 0;
1074
1075	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1076		return 0;
1077
1078	rcu_read_lock();
1079
1080	/* Find link */
1081	dev = dev_get_by_index_rcu(net, ifindex);
1082	if (!dev) {
1083		rcu_read_unlock();
1084		return -ENODEV;
1085	}
1086
1087	/* Find qdisc */
1088	if (!*parent) {
1089		*q = dev->qdisc;
1090		*parent = (*q)->handle;
1091	} else {
1092		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1093		if (!*q) {
1094			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1095			err = -EINVAL;
1096			goto errout_rcu;
1097		}
1098	}
1099
1100	*q = qdisc_refcount_inc_nz(*q);
1101	if (!*q) {
1102		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1103		err = -EINVAL;
1104		goto errout_rcu;
1105	}
1106
1107	/* Is it classful? */
1108	cops = (*q)->ops->cl_ops;
1109	if (!cops) {
1110		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1111		err = -EINVAL;
1112		goto errout_qdisc;
1113	}
1114
1115	if (!cops->tcf_block) {
1116		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1117		err = -EOPNOTSUPP;
1118		goto errout_qdisc;
1119	}
1120
1121errout_rcu:
1122	/* At this point we know that qdisc is not noop_qdisc,
1123	 * which means that qdisc holds a reference to net_device
1124	 * and we hold a reference to qdisc, so it is safe to release
1125	 * rcu read lock.
1126	 */
1127	rcu_read_unlock();
1128	return err;
1129
1130errout_qdisc:
1131	rcu_read_unlock();
1132
1133	if (rtnl_held)
1134		qdisc_put(*q);
1135	else
1136		qdisc_put_unlocked(*q);
1137	*q = NULL;
1138
1139	return err;
1140}
1141
1142static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1143			       int ifindex, struct netlink_ext_ack *extack)
1144{
1145	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1146		return 0;
1147
1148	/* Do we search for filter, attached to class? */
1149	if (TC_H_MIN(parent)) {
1150		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1151
1152		*cl = cops->find(q, parent);
1153		if (*cl == 0) {
1154			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1155			return -ENOENT;
1156		}
1157	}
1158
1159	return 0;
1160}
1161
1162static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1163					  unsigned long cl, int ifindex,
1164					  u32 block_index,
1165					  struct netlink_ext_ack *extack)
1166{
1167	struct tcf_block *block;
1168
1169	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1170		block = tcf_block_refcnt_get(net, block_index);
1171		if (!block) {
1172			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1173			return ERR_PTR(-EINVAL);
1174		}
1175	} else {
1176		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1177
1178		block = cops->tcf_block(q, cl, extack);
1179		if (!block)
1180			return ERR_PTR(-EINVAL);
1181
1182		if (tcf_block_shared(block)) {
1183			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1184			return ERR_PTR(-EOPNOTSUPP);
1185		}
1186
1187		/* Always take reference to block in order to support execution
1188		 * of rules update path of cls API without rtnl lock. Caller
1189		 * must release block when it is finished using it. 'if' block
1190		 * of this conditional obtain reference to block by calling
1191		 * tcf_block_refcnt_get().
1192		 */
1193		refcount_inc(&block->refcnt);
1194	}
1195
1196	return block;
1197}
1198
1199static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1200			    struct tcf_block_ext_info *ei, bool rtnl_held)
1201{
1202	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1203		/* Flushing/putting all chains will cause the block to be
1204		 * deallocated when last chain is freed. However, if chain_list
1205		 * is empty, block has to be manually deallocated. After block
1206		 * reference counter reached 0, it is no longer possible to
1207		 * increment it or add new chains to block.
1208		 */
1209		bool free_block = list_empty(&block->chain_list);
1210
1211		mutex_unlock(&block->lock);
1212		if (tcf_block_shared(block))
1213			tcf_block_remove(block, block->net);
1214
1215		if (q)
1216			tcf_block_offload_unbind(block, q, ei);
1217
1218		if (free_block)
1219			tcf_block_destroy(block);
1220		else
1221			tcf_block_flush_all_chains(block, rtnl_held);
1222	} else if (q) {
1223		tcf_block_offload_unbind(block, q, ei);
1224	}
1225}
1226
1227static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1228{
1229	__tcf_block_put(block, NULL, NULL, rtnl_held);
1230}
1231
1232/* Find tcf block.
1233 * Set q, parent, cl when appropriate.
1234 */
1235
1236static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1237					u32 *parent, unsigned long *cl,
1238					int ifindex, u32 block_index,
1239					struct netlink_ext_ack *extack)
1240{
1241	struct tcf_block *block;
1242	int err = 0;
1243
1244	ASSERT_RTNL();
1245
1246	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1247	if (err)
1248		goto errout;
1249
1250	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1251	if (err)
1252		goto errout_qdisc;
1253
1254	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1255	if (IS_ERR(block)) {
1256		err = PTR_ERR(block);
1257		goto errout_qdisc;
1258	}
1259
1260	return block;
1261
1262errout_qdisc:
1263	if (*q)
1264		qdisc_put(*q);
1265errout:
1266	*q = NULL;
1267	return ERR_PTR(err);
1268}
1269
1270static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1271			      bool rtnl_held)
1272{
1273	if (!IS_ERR_OR_NULL(block))
1274		tcf_block_refcnt_put(block, rtnl_held);
1275
1276	if (q) {
1277		if (rtnl_held)
1278			qdisc_put(q);
1279		else
1280			qdisc_put_unlocked(q);
1281	}
1282}
1283
1284struct tcf_block_owner_item {
1285	struct list_head list;
1286	struct Qdisc *q;
1287	enum flow_block_binder_type binder_type;
1288};
1289
1290static void
1291tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1292			       struct Qdisc *q,
1293			       enum flow_block_binder_type binder_type)
1294{
1295	if (block->keep_dst &&
1296	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1297	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1298		netif_keep_dst(qdisc_dev(q));
1299}
1300
1301void tcf_block_netif_keep_dst(struct tcf_block *block)
1302{
1303	struct tcf_block_owner_item *item;
1304
1305	block->keep_dst = true;
1306	list_for_each_entry(item, &block->owner_list, list)
1307		tcf_block_owner_netif_keep_dst(block, item->q,
1308					       item->binder_type);
1309}
1310EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1311
1312static int tcf_block_owner_add(struct tcf_block *block,
1313			       struct Qdisc *q,
1314			       enum flow_block_binder_type binder_type)
1315{
1316	struct tcf_block_owner_item *item;
1317
1318	item = kmalloc(sizeof(*item), GFP_KERNEL);
1319	if (!item)
1320		return -ENOMEM;
1321	item->q = q;
1322	item->binder_type = binder_type;
1323	list_add(&item->list, &block->owner_list);
1324	return 0;
1325}
1326
1327static void tcf_block_owner_del(struct tcf_block *block,
1328				struct Qdisc *q,
1329				enum flow_block_binder_type binder_type)
1330{
1331	struct tcf_block_owner_item *item;
1332
1333	list_for_each_entry(item, &block->owner_list, list) {
1334		if (item->q == q && item->binder_type == binder_type) {
1335			list_del(&item->list);
1336			kfree(item);
1337			return;
1338		}
1339	}
1340	WARN_ON(1);
1341}
1342
 
 
 
 
 
 
 
 
1343int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1344		      struct tcf_block_ext_info *ei,
1345		      struct netlink_ext_ack *extack)
1346{
 
1347	struct net *net = qdisc_net(q);
1348	struct tcf_block *block = NULL;
1349	int err;
1350
1351	if (ei->block_index)
1352		/* block_index not 0 means the shared block is requested */
1353		block = tcf_block_refcnt_get(net, ei->block_index);
1354
1355	if (!block) {
1356		block = tcf_block_create(net, q, ei->block_index, extack);
1357		if (IS_ERR(block))
1358			return PTR_ERR(block);
1359		if (tcf_block_shared(block)) {
1360			err = tcf_block_insert(block, net, extack);
1361			if (err)
1362				goto err_block_insert;
1363		}
1364	}
1365
1366	err = tcf_block_owner_add(block, q, ei->binder_type);
1367	if (err)
1368		goto err_block_owner_add;
1369
1370	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1371
1372	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1373	if (err)
1374		goto err_chain0_head_change_cb_add;
1375
1376	err = tcf_block_offload_bind(block, q, ei, extack);
1377	if (err)
1378		goto err_block_offload_bind;
1379
 
 
 
 
 
 
 
 
1380	*p_block = block;
1381	return 0;
1382
 
 
1383err_block_offload_bind:
1384	tcf_chain0_head_change_cb_del(block, ei);
1385err_chain0_head_change_cb_add:
1386	tcf_block_owner_del(block, q, ei->binder_type);
1387err_block_owner_add:
1388err_block_insert:
1389	tcf_block_refcnt_put(block, true);
1390	return err;
1391}
1392EXPORT_SYMBOL(tcf_block_get_ext);
1393
1394static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1395{
1396	struct tcf_proto __rcu **p_filter_chain = priv;
1397
1398	rcu_assign_pointer(*p_filter_chain, tp_head);
1399}
1400
1401int tcf_block_get(struct tcf_block **p_block,
1402		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1403		  struct netlink_ext_ack *extack)
1404{
1405	struct tcf_block_ext_info ei = {
1406		.chain_head_change = tcf_chain_head_change_dflt,
1407		.chain_head_change_priv = p_filter_chain,
1408	};
1409
1410	WARN_ON(!p_filter_chain);
1411	return tcf_block_get_ext(p_block, q, &ei, extack);
1412}
1413EXPORT_SYMBOL(tcf_block_get);
1414
1415/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1416 * actions should be all removed after flushing.
1417 */
1418void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1419		       struct tcf_block_ext_info *ei)
1420{
 
 
1421	if (!block)
1422		return;
 
 
1423	tcf_chain0_head_change_cb_del(block, ei);
1424	tcf_block_owner_del(block, q, ei->binder_type);
1425
1426	__tcf_block_put(block, q, ei, true);
1427}
1428EXPORT_SYMBOL(tcf_block_put_ext);
1429
1430void tcf_block_put(struct tcf_block *block)
1431{
1432	struct tcf_block_ext_info ei = {0, };
1433
1434	if (!block)
1435		return;
1436	tcf_block_put_ext(block, block->q, &ei);
1437}
1438
1439EXPORT_SYMBOL(tcf_block_put);
1440
1441static int
1442tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1443			    void *cb_priv, bool add, bool offload_in_use,
1444			    struct netlink_ext_ack *extack)
1445{
1446	struct tcf_chain *chain, *chain_prev;
1447	struct tcf_proto *tp, *tp_prev;
1448	int err;
1449
1450	lockdep_assert_held(&block->cb_lock);
1451
1452	for (chain = __tcf_get_next_chain(block, NULL);
1453	     chain;
1454	     chain_prev = chain,
1455		     chain = __tcf_get_next_chain(block, chain),
1456		     tcf_chain_put(chain_prev)) {
 
 
 
1457		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1458		     tp_prev = tp,
1459			     tp = __tcf_get_next_proto(chain, tp),
1460			     tcf_proto_put(tp_prev, true, NULL)) {
1461			if (tp->ops->reoffload) {
1462				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1463							 extack);
1464				if (err && add)
1465					goto err_playback_remove;
1466			} else if (add && offload_in_use) {
1467				err = -EOPNOTSUPP;
1468				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1469				goto err_playback_remove;
1470			}
1471		}
 
 
 
1472	}
1473
1474	return 0;
1475
1476err_playback_remove:
1477	tcf_proto_put(tp, true, NULL);
1478	tcf_chain_put(chain);
1479	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1480				    extack);
1481	return err;
1482}
1483
1484static int tcf_block_bind(struct tcf_block *block,
1485			  struct flow_block_offload *bo)
1486{
1487	struct flow_block_cb *block_cb, *next;
1488	int err, i = 0;
1489
1490	lockdep_assert_held(&block->cb_lock);
1491
1492	list_for_each_entry(block_cb, &bo->cb_list, list) {
1493		err = tcf_block_playback_offloads(block, block_cb->cb,
1494						  block_cb->cb_priv, true,
1495						  tcf_block_offload_in_use(block),
1496						  bo->extack);
1497		if (err)
1498			goto err_unroll;
1499		if (!bo->unlocked_driver_cb)
1500			block->lockeddevcnt++;
1501
1502		i++;
1503	}
1504	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1505
1506	return 0;
1507
1508err_unroll:
1509	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
 
1510		if (i-- > 0) {
1511			list_del(&block_cb->list);
1512			tcf_block_playback_offloads(block, block_cb->cb,
1513						    block_cb->cb_priv, false,
1514						    tcf_block_offload_in_use(block),
1515						    NULL);
1516			if (!bo->unlocked_driver_cb)
1517				block->lockeddevcnt--;
1518		}
1519		flow_block_cb_free(block_cb);
1520	}
1521
1522	return err;
1523}
1524
1525static void tcf_block_unbind(struct tcf_block *block,
1526			     struct flow_block_offload *bo)
1527{
1528	struct flow_block_cb *block_cb, *next;
1529
1530	lockdep_assert_held(&block->cb_lock);
1531
1532	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1533		tcf_block_playback_offloads(block, block_cb->cb,
1534					    block_cb->cb_priv, false,
1535					    tcf_block_offload_in_use(block),
1536					    NULL);
1537		list_del(&block_cb->list);
1538		flow_block_cb_free(block_cb);
1539		if (!bo->unlocked_driver_cb)
1540			block->lockeddevcnt--;
1541	}
1542}
1543
1544static int tcf_block_setup(struct tcf_block *block,
1545			   struct flow_block_offload *bo)
1546{
1547	int err;
1548
1549	switch (bo->command) {
1550	case FLOW_BLOCK_BIND:
1551		err = tcf_block_bind(block, bo);
1552		break;
1553	case FLOW_BLOCK_UNBIND:
1554		err = 0;
1555		tcf_block_unbind(block, bo);
1556		break;
1557	default:
1558		WARN_ON_ONCE(1);
1559		err = -EOPNOTSUPP;
1560	}
1561
1562	return err;
1563}
1564
1565/* Main classifier routine: scans classifier chain attached
1566 * to this qdisc, (optionally) tests for protocol and asks
1567 * specific classifiers.
1568 */
1569int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1570		 struct tcf_result *res, bool compat_mode)
 
 
 
 
 
 
1571{
1572#ifdef CONFIG_NET_CLS_ACT
1573	const int max_reclassify_loop = 4;
1574	const struct tcf_proto *orig_tp = tp;
1575	const struct tcf_proto *first_tp;
1576	int limit = 0;
1577
1578reclassify:
1579#endif
1580	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1581		__be16 protocol = tc_skb_protocol(skb);
1582		int err;
1583
1584		if (tp->protocol != protocol &&
1585		    tp->protocol != htons(ETH_P_ALL))
1586			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1587
1588		err = tp->classify(skb, tp, res);
 
1589#ifdef CONFIG_NET_CLS_ACT
1590		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1591			first_tp = orig_tp;
 
1592			goto reset;
1593		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1594			first_tp = res->goto_tp;
1595
1596#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1597			{
1598				struct tc_skb_ext *ext;
1599
1600				ext = skb_ext_add(skb, TC_SKB_EXT);
1601				if (WARN_ON_ONCE(!ext))
1602					return TC_ACT_SHOT;
1603
1604				ext->chain = err & TC_ACT_EXT_VAL_MASK;
1605			}
1606#endif
1607			goto reset;
1608		}
1609#endif
1610		if (err >= 0)
1611			return err;
1612	}
1613
 
 
 
 
 
 
1614	return TC_ACT_UNSPEC; /* signal: continue lookup */
1615#ifdef CONFIG_NET_CLS_ACT
1616reset:
1617	if (unlikely(limit++ >= max_reclassify_loop)) {
1618		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1619				       tp->chain->block->index,
1620				       tp->prio & 0xffff,
1621				       ntohs(tp->protocol));
 
 
1622		return TC_ACT_SHOT;
1623	}
1624
1625	tp = first_tp;
1626	goto reclassify;
1627#endif
1628}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1629EXPORT_SYMBOL(tcf_classify);
1630
1631struct tcf_chain_info {
1632	struct tcf_proto __rcu **pprev;
1633	struct tcf_proto __rcu *next;
1634};
1635
1636static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1637					   struct tcf_chain_info *chain_info)
1638{
1639	return tcf_chain_dereference(*chain_info->pprev, chain);
1640}
1641
1642static int tcf_chain_tp_insert(struct tcf_chain *chain,
1643			       struct tcf_chain_info *chain_info,
1644			       struct tcf_proto *tp)
1645{
1646	if (chain->flushing)
1647		return -EAGAIN;
1648
 
1649	if (*chain_info->pprev == chain->filter_chain)
1650		tcf_chain0_head_change(chain, tp);
1651	tcf_proto_get(tp);
1652	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1653	rcu_assign_pointer(*chain_info->pprev, tp);
1654
1655	return 0;
1656}
1657
1658static void tcf_chain_tp_remove(struct tcf_chain *chain,
1659				struct tcf_chain_info *chain_info,
1660				struct tcf_proto *tp)
1661{
1662	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1663
1664	tcf_proto_mark_delete(tp);
1665	if (tp == chain->filter_chain)
1666		tcf_chain0_head_change(chain, next);
1667	RCU_INIT_POINTER(*chain_info->pprev, next);
1668}
1669
1670static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1671					   struct tcf_chain_info *chain_info,
1672					   u32 protocol, u32 prio,
1673					   bool prio_allocate);
 
1674
1675/* Try to insert new proto.
1676 * If proto with specified priority already exists, free new proto
1677 * and return existing one.
1678 */
1679
1680static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1681						    struct tcf_proto *tp_new,
1682						    u32 protocol, u32 prio,
1683						    bool rtnl_held)
1684{
1685	struct tcf_chain_info chain_info;
1686	struct tcf_proto *tp;
1687	int err = 0;
1688
1689	mutex_lock(&chain->filter_chain_lock);
1690
1691	if (tcf_proto_exists_destroying(chain, tp_new)) {
1692		mutex_unlock(&chain->filter_chain_lock);
1693		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1694		return ERR_PTR(-EAGAIN);
1695	}
1696
1697	tp = tcf_chain_tp_find(chain, &chain_info,
1698			       protocol, prio, false);
1699	if (!tp)
1700		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1701	mutex_unlock(&chain->filter_chain_lock);
1702
1703	if (tp) {
1704		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1705		tp_new = tp;
1706	} else if (err) {
1707		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1708		tp_new = ERR_PTR(err);
1709	}
1710
1711	return tp_new;
1712}
1713
1714static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1715				      struct tcf_proto *tp, bool rtnl_held,
1716				      struct netlink_ext_ack *extack)
1717{
1718	struct tcf_chain_info chain_info;
1719	struct tcf_proto *tp_iter;
1720	struct tcf_proto **pprev;
1721	struct tcf_proto *next;
1722
1723	mutex_lock(&chain->filter_chain_lock);
1724
1725	/* Atomically find and remove tp from chain. */
1726	for (pprev = &chain->filter_chain;
1727	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1728	     pprev = &tp_iter->next) {
1729		if (tp_iter == tp) {
1730			chain_info.pprev = pprev;
1731			chain_info.next = tp_iter->next;
1732			WARN_ON(tp_iter->deleting);
1733			break;
1734		}
1735	}
1736	/* Verify that tp still exists and no new filters were inserted
1737	 * concurrently.
1738	 * Mark tp for deletion if it is empty.
1739	 */
1740	if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) {
1741		mutex_unlock(&chain->filter_chain_lock);
1742		return;
1743	}
1744
1745	tcf_proto_signal_destroying(chain, tp);
1746	next = tcf_chain_dereference(chain_info.next, chain);
1747	if (tp == chain->filter_chain)
1748		tcf_chain0_head_change(chain, next);
1749	RCU_INIT_POINTER(*chain_info.pprev, next);
1750	mutex_unlock(&chain->filter_chain_lock);
1751
1752	tcf_proto_put(tp, rtnl_held, extack);
1753}
1754
1755static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1756					   struct tcf_chain_info *chain_info,
1757					   u32 protocol, u32 prio,
1758					   bool prio_allocate)
 
1759{
1760	struct tcf_proto **pprev;
1761	struct tcf_proto *tp;
1762
1763	/* Check the chain for existence of proto-tcf with this priority */
1764	for (pprev = &chain->filter_chain;
1765	     (tp = tcf_chain_dereference(*pprev, chain));
1766	     pprev = &tp->next) {
1767		if (tp->prio >= prio) {
1768			if (tp->prio == prio) {
1769				if (prio_allocate ||
1770				    (tp->protocol != protocol && protocol))
 
 
 
 
1771					return ERR_PTR(-EINVAL);
 
1772			} else {
1773				tp = NULL;
1774			}
1775			break;
1776		}
1777	}
1778	chain_info->pprev = pprev;
1779	if (tp) {
1780		chain_info->next = tp->next;
1781		tcf_proto_get(tp);
1782	} else {
1783		chain_info->next = NULL;
1784	}
1785	return tp;
1786}
1787
1788static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1789			 struct tcf_proto *tp, struct tcf_block *block,
1790			 struct Qdisc *q, u32 parent, void *fh,
1791			 u32 portid, u32 seq, u16 flags, int event,
1792			 bool rtnl_held)
 
1793{
1794	struct tcmsg *tcm;
1795	struct nlmsghdr  *nlh;
1796	unsigned char *b = skb_tail_pointer(skb);
1797
1798	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1799	if (!nlh)
1800		goto out_nlmsg_trim;
1801	tcm = nlmsg_data(nlh);
1802	tcm->tcm_family = AF_UNSPEC;
1803	tcm->tcm__pad1 = 0;
1804	tcm->tcm__pad2 = 0;
1805	if (q) {
1806		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1807		tcm->tcm_parent = parent;
1808	} else {
1809		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1810		tcm->tcm_block_index = block->index;
1811	}
1812	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1813	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1814		goto nla_put_failure;
1815	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1816		goto nla_put_failure;
1817	if (!fh) {
1818		tcm->tcm_handle = 0;
 
 
 
 
 
 
 
 
1819	} else {
1820		if (tp->ops->dump &&
1821		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1822			goto nla_put_failure;
1823	}
 
 
 
 
 
1824	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 
1825	return skb->len;
1826
1827out_nlmsg_trim:
1828nla_put_failure:
 
1829	nlmsg_trim(skb, b);
1830	return -1;
1831}
1832
1833static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1834			  struct nlmsghdr *n, struct tcf_proto *tp,
1835			  struct tcf_block *block, struct Qdisc *q,
1836			  u32 parent, void *fh, int event, bool unicast,
1837			  bool rtnl_held)
1838{
1839	struct sk_buff *skb;
1840	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1841	int err = 0;
1842
 
 
 
1843	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1844	if (!skb)
1845		return -ENOBUFS;
1846
1847	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1848			  n->nlmsg_seq, n->nlmsg_flags, event,
1849			  rtnl_held) <= 0) {
1850		kfree_skb(skb);
1851		return -EINVAL;
1852	}
1853
1854	if (unicast)
1855		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1856	else
1857		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1858				     n->nlmsg_flags & NLM_F_ECHO);
1859
1860	if (err > 0)
1861		err = 0;
1862	return err;
1863}
1864
1865static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1866			      struct nlmsghdr *n, struct tcf_proto *tp,
1867			      struct tcf_block *block, struct Qdisc *q,
1868			      u32 parent, void *fh, bool unicast, bool *last,
1869			      bool rtnl_held, struct netlink_ext_ack *extack)
1870{
1871	struct sk_buff *skb;
1872	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1873	int err;
1874
 
 
 
1875	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1876	if (!skb)
1877		return -ENOBUFS;
1878
1879	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1880			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1881			  rtnl_held) <= 0) {
1882		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1883		kfree_skb(skb);
1884		return -EINVAL;
1885	}
1886
1887	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1888	if (err) {
1889		kfree_skb(skb);
1890		return err;
1891	}
1892
1893	if (unicast)
1894		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1895	else
1896		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1897				     n->nlmsg_flags & NLM_F_ECHO);
1898	if (err < 0)
1899		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1900
1901	if (err > 0)
1902		err = 0;
1903	return err;
1904}
1905
1906static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1907				 struct tcf_block *block, struct Qdisc *q,
1908				 u32 parent, struct nlmsghdr *n,
1909				 struct tcf_chain *chain, int event,
1910				 bool rtnl_held)
1911{
1912	struct tcf_proto *tp;
1913
1914	for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1915	     tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1916		tfilter_notify(net, oskb, n, tp, block,
1917			       q, parent, NULL, event, false, rtnl_held);
1918}
1919
1920static void tfilter_put(struct tcf_proto *tp, void *fh)
1921{
1922	if (tp->ops->put && fh)
1923		tp->ops->put(tp, fh);
1924}
1925
 
 
 
 
 
1926static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1927			  struct netlink_ext_ack *extack)
1928{
1929	struct net *net = sock_net(skb->sk);
1930	struct nlattr *tca[TCA_MAX + 1];
1931	char name[IFNAMSIZ];
1932	struct tcmsg *t;
1933	u32 protocol;
1934	u32 prio;
1935	bool prio_allocate;
1936	u32 parent;
1937	u32 chain_index;
1938	struct Qdisc *q = NULL;
1939	struct tcf_chain_info chain_info;
1940	struct tcf_chain *chain = NULL;
1941	struct tcf_block *block;
1942	struct tcf_proto *tp;
1943	unsigned long cl;
1944	void *fh;
1945	int err;
1946	int tp_created;
1947	bool rtnl_held = false;
1948
1949	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1950		return -EPERM;
1951
1952replay:
1953	tp_created = 0;
1954
1955	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1956				     rtm_tca_policy, extack);
1957	if (err < 0)
1958		return err;
1959
1960	t = nlmsg_data(n);
1961	protocol = TC_H_MIN(t->tcm_info);
1962	prio = TC_H_MAJ(t->tcm_info);
1963	prio_allocate = false;
1964	parent = t->tcm_parent;
1965	tp = NULL;
1966	cl = 0;
1967	block = NULL;
 
 
 
1968
1969	if (prio == 0) {
1970		/* If no priority is provided by the user,
1971		 * we allocate one.
1972		 */
1973		if (n->nlmsg_flags & NLM_F_CREATE) {
1974			prio = TC_H_MAKE(0x80000000U, 0U);
1975			prio_allocate = true;
1976		} else {
1977			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1978			return -ENOENT;
1979		}
1980	}
1981
1982	/* Find head of filter chain. */
1983
1984	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1985	if (err)
1986		return err;
1987
1988	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
1989		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
1990		err = -EINVAL;
1991		goto errout;
1992	}
1993
1994	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
1995	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
1996	 * type is not specified, classifier is not unlocked.
1997	 */
1998	if (rtnl_held ||
1999	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2000	    !tcf_proto_is_unlocked(name)) {
2001		rtnl_held = true;
2002		rtnl_lock();
2003	}
2004
2005	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2006	if (err)
2007		goto errout;
2008
2009	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2010				 extack);
2011	if (IS_ERR(block)) {
2012		err = PTR_ERR(block);
2013		goto errout;
2014	}
 
2015
2016	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2017	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2018		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2019		err = -EINVAL;
2020		goto errout;
2021	}
2022	chain = tcf_chain_get(block, chain_index, true);
2023	if (!chain) {
2024		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2025		err = -ENOMEM;
2026		goto errout;
2027	}
2028
2029	mutex_lock(&chain->filter_chain_lock);
2030	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2031			       prio, prio_allocate);
2032	if (IS_ERR(tp)) {
2033		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2034		err = PTR_ERR(tp);
2035		goto errout_locked;
2036	}
2037
2038	if (tp == NULL) {
2039		struct tcf_proto *tp_new = NULL;
2040
2041		if (chain->flushing) {
2042			err = -EAGAIN;
2043			goto errout_locked;
2044		}
2045
2046		/* Proto-tcf does not exist, create new one */
2047
2048		if (tca[TCA_KIND] == NULL || !protocol) {
2049			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2050			err = -EINVAL;
2051			goto errout_locked;
2052		}
2053
2054		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2055			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2056			err = -ENOENT;
2057			goto errout_locked;
2058		}
2059
2060		if (prio_allocate)
2061			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2062							       &chain_info));
2063
2064		mutex_unlock(&chain->filter_chain_lock);
2065		tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
2066					  protocol, prio, chain, rtnl_held,
2067					  extack);
2068		if (IS_ERR(tp_new)) {
2069			err = PTR_ERR(tp_new);
2070			goto errout_tp;
2071		}
2072
2073		tp_created = 1;
2074		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2075						rtnl_held);
2076		if (IS_ERR(tp)) {
2077			err = PTR_ERR(tp);
2078			goto errout_tp;
2079		}
2080	} else {
2081		mutex_unlock(&chain->filter_chain_lock);
2082	}
2083
2084	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2085		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2086		err = -EINVAL;
2087		goto errout;
2088	}
2089
2090	fh = tp->ops->get(tp, t->tcm_handle);
2091
2092	if (!fh) {
2093		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2094			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2095			err = -ENOENT;
2096			goto errout;
2097		}
2098	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2099		tfilter_put(tp, fh);
2100		NL_SET_ERR_MSG(extack, "Filter already exists");
2101		err = -EEXIST;
2102		goto errout;
2103	}
2104
2105	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
 
2106		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2107		err = -EINVAL;
2108		goto errout;
2109	}
2110
 
 
 
 
 
 
2111	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2112			      n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2113			      rtnl_held, extack);
2114	if (err == 0) {
2115		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2116			       RTM_NEWTFILTER, false, rtnl_held);
2117		tfilter_put(tp, fh);
 
2118		/* q pointer is NULL for shared blocks */
2119		if (q)
2120			q->flags &= ~TCQ_F_CAN_BYPASS;
2121	}
2122
2123errout:
2124	if (err && tp_created)
2125		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2126errout_tp:
2127	if (chain) {
2128		if (tp && !IS_ERR(tp))
2129			tcf_proto_put(tp, rtnl_held, NULL);
2130		if (!tp_created)
2131			tcf_chain_put(chain);
2132	}
2133	tcf_block_release(q, block, rtnl_held);
2134
2135	if (rtnl_held)
2136		rtnl_unlock();
2137
2138	if (err == -EAGAIN) {
2139		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2140		 * of target chain.
2141		 */
2142		rtnl_held = true;
2143		/* Replay the request. */
2144		goto replay;
2145	}
2146	return err;
2147
2148errout_locked:
2149	mutex_unlock(&chain->filter_chain_lock);
2150	goto errout;
2151}
2152
2153static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2154			  struct netlink_ext_ack *extack)
2155{
2156	struct net *net = sock_net(skb->sk);
2157	struct nlattr *tca[TCA_MAX + 1];
2158	char name[IFNAMSIZ];
2159	struct tcmsg *t;
2160	u32 protocol;
2161	u32 prio;
2162	u32 parent;
2163	u32 chain_index;
2164	struct Qdisc *q = NULL;
2165	struct tcf_chain_info chain_info;
2166	struct tcf_chain *chain = NULL;
2167	struct tcf_block *block = NULL;
2168	struct tcf_proto *tp = NULL;
2169	unsigned long cl = 0;
2170	void *fh = NULL;
2171	int err;
2172	bool rtnl_held = false;
2173
2174	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2175		return -EPERM;
2176
2177	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2178				     rtm_tca_policy, extack);
2179	if (err < 0)
2180		return err;
2181
2182	t = nlmsg_data(n);
2183	protocol = TC_H_MIN(t->tcm_info);
2184	prio = TC_H_MAJ(t->tcm_info);
2185	parent = t->tcm_parent;
2186
2187	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2188		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2189		return -ENOENT;
2190	}
2191
2192	/* Find head of filter chain. */
2193
2194	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2195	if (err)
2196		return err;
2197
2198	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2199		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2200		err = -EINVAL;
2201		goto errout;
2202	}
2203	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2204	 * found), qdisc is not unlocked, classifier type is not specified,
2205	 * classifier is not unlocked.
2206	 */
2207	if (!prio ||
2208	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2209	    !tcf_proto_is_unlocked(name)) {
2210		rtnl_held = true;
2211		rtnl_lock();
2212	}
2213
2214	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2215	if (err)
2216		goto errout;
2217
2218	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2219				 extack);
2220	if (IS_ERR(block)) {
2221		err = PTR_ERR(block);
2222		goto errout;
2223	}
2224
2225	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2226	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2227		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2228		err = -EINVAL;
2229		goto errout;
2230	}
2231	chain = tcf_chain_get(block, chain_index, false);
2232	if (!chain) {
2233		/* User requested flush on non-existent chain. Nothing to do,
2234		 * so just return success.
2235		 */
2236		if (prio == 0) {
2237			err = 0;
2238			goto errout;
2239		}
2240		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2241		err = -ENOENT;
2242		goto errout;
2243	}
2244
2245	if (prio == 0) {
2246		tfilter_notify_chain(net, skb, block, q, parent, n,
2247				     chain, RTM_DELTFILTER, rtnl_held);
2248		tcf_chain_flush(chain, rtnl_held);
2249		err = 0;
2250		goto errout;
2251	}
2252
2253	mutex_lock(&chain->filter_chain_lock);
2254	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2255			       prio, false);
2256	if (!tp || IS_ERR(tp)) {
 
2257		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2258		err = tp ? PTR_ERR(tp) : -ENOENT;
 
 
2259		goto errout_locked;
2260	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2261		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2262		err = -EINVAL;
2263		goto errout_locked;
2264	} else if (t->tcm_handle == 0) {
2265		tcf_proto_signal_destroying(chain, tp);
2266		tcf_chain_tp_remove(chain, &chain_info, tp);
2267		mutex_unlock(&chain->filter_chain_lock);
2268
2269		tcf_proto_put(tp, rtnl_held, NULL);
2270		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2271			       RTM_DELTFILTER, false, rtnl_held);
2272		err = 0;
2273		goto errout;
2274	}
2275	mutex_unlock(&chain->filter_chain_lock);
2276
2277	fh = tp->ops->get(tp, t->tcm_handle);
2278
2279	if (!fh) {
2280		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2281		err = -ENOENT;
2282	} else {
2283		bool last;
2284
2285		err = tfilter_del_notify(net, skb, n, tp, block,
2286					 q, parent, fh, false, &last,
2287					 rtnl_held, extack);
2288
2289		if (err)
2290			goto errout;
2291		if (last)
2292			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2293	}
2294
2295errout:
2296	if (chain) {
2297		if (tp && !IS_ERR(tp))
2298			tcf_proto_put(tp, rtnl_held, NULL);
2299		tcf_chain_put(chain);
2300	}
2301	tcf_block_release(q, block, rtnl_held);
2302
2303	if (rtnl_held)
2304		rtnl_unlock();
2305
2306	return err;
2307
2308errout_locked:
2309	mutex_unlock(&chain->filter_chain_lock);
2310	goto errout;
2311}
2312
2313static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2314			  struct netlink_ext_ack *extack)
2315{
2316	struct net *net = sock_net(skb->sk);
2317	struct nlattr *tca[TCA_MAX + 1];
2318	char name[IFNAMSIZ];
2319	struct tcmsg *t;
2320	u32 protocol;
2321	u32 prio;
2322	u32 parent;
2323	u32 chain_index;
2324	struct Qdisc *q = NULL;
2325	struct tcf_chain_info chain_info;
2326	struct tcf_chain *chain = NULL;
2327	struct tcf_block *block = NULL;
2328	struct tcf_proto *tp = NULL;
2329	unsigned long cl = 0;
2330	void *fh = NULL;
2331	int err;
2332	bool rtnl_held = false;
2333
2334	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2335				     rtm_tca_policy, extack);
2336	if (err < 0)
2337		return err;
2338
2339	t = nlmsg_data(n);
2340	protocol = TC_H_MIN(t->tcm_info);
2341	prio = TC_H_MAJ(t->tcm_info);
2342	parent = t->tcm_parent;
2343
2344	if (prio == 0) {
2345		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2346		return -ENOENT;
2347	}
2348
2349	/* Find head of filter chain. */
2350
2351	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2352	if (err)
2353		return err;
2354
2355	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2356		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2357		err = -EINVAL;
2358		goto errout;
2359	}
2360	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2361	 * unlocked, classifier type is not specified, classifier is not
2362	 * unlocked.
2363	 */
2364	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2365	    !tcf_proto_is_unlocked(name)) {
2366		rtnl_held = true;
2367		rtnl_lock();
2368	}
2369
2370	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2371	if (err)
2372		goto errout;
2373
2374	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2375				 extack);
2376	if (IS_ERR(block)) {
2377		err = PTR_ERR(block);
2378		goto errout;
2379	}
2380
2381	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2382	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2383		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2384		err = -EINVAL;
2385		goto errout;
2386	}
2387	chain = tcf_chain_get(block, chain_index, false);
2388	if (!chain) {
2389		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2390		err = -EINVAL;
2391		goto errout;
2392	}
2393
2394	mutex_lock(&chain->filter_chain_lock);
2395	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2396			       prio, false);
2397	mutex_unlock(&chain->filter_chain_lock);
2398	if (!tp || IS_ERR(tp)) {
 
2399		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2400		err = tp ? PTR_ERR(tp) : -ENOENT;
 
 
2401		goto errout;
2402	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2403		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2404		err = -EINVAL;
2405		goto errout;
2406	}
2407
2408	fh = tp->ops->get(tp, t->tcm_handle);
2409
2410	if (!fh) {
2411		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2412		err = -ENOENT;
2413	} else {
2414		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2415				     fh, RTM_NEWTFILTER, true, rtnl_held);
2416		if (err < 0)
2417			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2418	}
2419
2420	tfilter_put(tp, fh);
2421errout:
2422	if (chain) {
2423		if (tp && !IS_ERR(tp))
2424			tcf_proto_put(tp, rtnl_held, NULL);
2425		tcf_chain_put(chain);
2426	}
2427	tcf_block_release(q, block, rtnl_held);
2428
2429	if (rtnl_held)
2430		rtnl_unlock();
2431
2432	return err;
2433}
2434
2435struct tcf_dump_args {
2436	struct tcf_walker w;
2437	struct sk_buff *skb;
2438	struct netlink_callback *cb;
2439	struct tcf_block *block;
2440	struct Qdisc *q;
2441	u32 parent;
 
2442};
2443
2444static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2445{
2446	struct tcf_dump_args *a = (void *)arg;
2447	struct net *net = sock_net(a->skb->sk);
2448
2449	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2450			     n, NETLINK_CB(a->cb->skb).portid,
2451			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2452			     RTM_NEWTFILTER, true);
2453}
2454
2455static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2456			   struct sk_buff *skb, struct netlink_callback *cb,
2457			   long index_start, long *p_index)
2458{
2459	struct net *net = sock_net(skb->sk);
2460	struct tcf_block *block = chain->block;
2461	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2462	struct tcf_proto *tp, *tp_prev;
2463	struct tcf_dump_args arg;
2464
2465	for (tp = __tcf_get_next_proto(chain, NULL);
2466	     tp;
2467	     tp_prev = tp,
2468		     tp = __tcf_get_next_proto(chain, tp),
2469		     tcf_proto_put(tp_prev, true, NULL),
2470		     (*p_index)++) {
2471		if (*p_index < index_start)
2472			continue;
2473		if (TC_H_MAJ(tcm->tcm_info) &&
2474		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2475			continue;
2476		if (TC_H_MIN(tcm->tcm_info) &&
2477		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2478			continue;
2479		if (*p_index > index_start)
2480			memset(&cb->args[1], 0,
2481			       sizeof(cb->args) - sizeof(cb->args[0]));
2482		if (cb->args[1] == 0) {
2483			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2484					  NETLINK_CB(cb->skb).portid,
2485					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2486					  RTM_NEWTFILTER, true) <= 0)
2487				goto errout;
2488			cb->args[1] = 1;
2489		}
2490		if (!tp->ops->walk)
2491			continue;
2492		arg.w.fn = tcf_node_dump;
2493		arg.skb = skb;
2494		arg.cb = cb;
2495		arg.block = block;
2496		arg.q = q;
2497		arg.parent = parent;
2498		arg.w.stop = 0;
2499		arg.w.skip = cb->args[1] - 1;
2500		arg.w.count = 0;
2501		arg.w.cookie = cb->args[2];
 
2502		tp->ops->walk(tp, &arg.w, true);
2503		cb->args[2] = arg.w.cookie;
2504		cb->args[1] = arg.w.count + 1;
2505		if (arg.w.stop)
2506			goto errout;
2507	}
2508	return true;
2509
2510errout:
2511	tcf_proto_put(tp, true, NULL);
2512	return false;
2513}
2514
 
 
 
 
 
2515/* called with RTNL */
2516static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2517{
2518	struct tcf_chain *chain, *chain_prev;
2519	struct net *net = sock_net(skb->sk);
2520	struct nlattr *tca[TCA_MAX + 1];
2521	struct Qdisc *q = NULL;
2522	struct tcf_block *block;
2523	struct tcmsg *tcm = nlmsg_data(cb->nlh);
 
2524	long index_start;
2525	long index;
2526	u32 parent;
2527	int err;
2528
2529	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2530		return skb->len;
2531
2532	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2533				     NULL, cb->extack);
2534	if (err)
2535		return err;
2536
 
 
 
 
 
 
 
2537	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2538		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2539		if (!block)
2540			goto out;
2541		/* If we work with block index, q is NULL and parent value
2542		 * will never be used in the following code. The check
2543		 * in tcf_fill_node prevents it. However, compiler does not
2544		 * see that far, so set parent to zero to silence the warning
2545		 * about parent being uninitialized.
2546		 */
2547		parent = 0;
2548	} else {
2549		const struct Qdisc_class_ops *cops;
2550		struct net_device *dev;
2551		unsigned long cl = 0;
2552
2553		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2554		if (!dev)
2555			return skb->len;
2556
2557		parent = tcm->tcm_parent;
2558		if (!parent) {
2559			q = dev->qdisc;
2560			parent = q->handle;
2561		} else {
2562			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2563		}
2564		if (!q)
2565			goto out;
2566		cops = q->ops->cl_ops;
2567		if (!cops)
2568			goto out;
2569		if (!cops->tcf_block)
2570			goto out;
2571		if (TC_H_MIN(tcm->tcm_parent)) {
2572			cl = cops->find(q, tcm->tcm_parent);
2573			if (cl == 0)
2574				goto out;
2575		}
2576		block = cops->tcf_block(q, cl, NULL);
2577		if (!block)
2578			goto out;
 
2579		if (tcf_block_shared(block))
2580			q = NULL;
2581	}
2582
2583	index_start = cb->args[0];
2584	index = 0;
2585
2586	for (chain = __tcf_get_next_chain(block, NULL);
2587	     chain;
2588	     chain_prev = chain,
2589		     chain = __tcf_get_next_chain(block, chain),
2590		     tcf_chain_put(chain_prev)) {
2591		if (tca[TCA_CHAIN] &&
2592		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2593			continue;
2594		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2595				    index_start, &index)) {
2596			tcf_chain_put(chain);
2597			err = -EMSGSIZE;
2598			break;
2599		}
2600	}
2601
2602	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2603		tcf_block_refcnt_put(block, true);
2604	cb->args[0] = index;
2605
2606out:
2607	/* If we did no progress, the error (EMSGSIZE) is real */
2608	if (skb->len == 0 && err)
2609		return err;
2610	return skb->len;
2611}
2612
2613static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2614			      void *tmplt_priv, u32 chain_index,
2615			      struct net *net, struct sk_buff *skb,
2616			      struct tcf_block *block,
2617			      u32 portid, u32 seq, u16 flags, int event)
 
2618{
2619	unsigned char *b = skb_tail_pointer(skb);
2620	const struct tcf_proto_ops *ops;
2621	struct nlmsghdr *nlh;
2622	struct tcmsg *tcm;
2623	void *priv;
2624
2625	ops = tmplt_ops;
2626	priv = tmplt_priv;
2627
2628	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2629	if (!nlh)
2630		goto out_nlmsg_trim;
2631	tcm = nlmsg_data(nlh);
2632	tcm->tcm_family = AF_UNSPEC;
2633	tcm->tcm__pad1 = 0;
2634	tcm->tcm__pad2 = 0;
2635	tcm->tcm_handle = 0;
2636	if (block->q) {
2637		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2638		tcm->tcm_parent = block->q->handle;
2639	} else {
2640		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2641		tcm->tcm_block_index = block->index;
2642	}
2643
2644	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2645		goto nla_put_failure;
2646
2647	if (ops) {
2648		if (nla_put_string(skb, TCA_KIND, ops->kind))
2649			goto nla_put_failure;
2650		if (ops->tmplt_dump(skb, net, priv) < 0)
2651			goto nla_put_failure;
2652	}
2653
 
 
 
 
2654	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 
2655	return skb->len;
2656
2657out_nlmsg_trim:
2658nla_put_failure:
2659	nlmsg_trim(skb, b);
2660	return -EMSGSIZE;
2661}
2662
2663static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2664			   u32 seq, u16 flags, int event, bool unicast)
 
2665{
2666	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2667	struct tcf_block *block = chain->block;
2668	struct net *net = block->net;
2669	struct sk_buff *skb;
2670	int err = 0;
2671
 
 
 
2672	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2673	if (!skb)
2674		return -ENOBUFS;
2675
2676	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2677			       chain->index, net, skb, block, portid,
2678			       seq, flags, event) <= 0) {
2679		kfree_skb(skb);
2680		return -EINVAL;
2681	}
2682
2683	if (unicast)
2684		err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2685	else
2686		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2687				     flags & NLM_F_ECHO);
2688
2689	if (err > 0)
2690		err = 0;
2691	return err;
2692}
2693
2694static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2695				  void *tmplt_priv, u32 chain_index,
2696				  struct tcf_block *block, struct sk_buff *oskb,
2697				  u32 seq, u16 flags, bool unicast)
2698{
2699	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2700	struct net *net = block->net;
2701	struct sk_buff *skb;
2702
 
 
 
2703	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2704	if (!skb)
2705		return -ENOBUFS;
2706
2707	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2708			       block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2709		kfree_skb(skb);
2710		return -EINVAL;
2711	}
2712
2713	if (unicast)
2714		return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2715
2716	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2717}
2718
2719static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2720			      struct nlattr **tca,
2721			      struct netlink_ext_ack *extack)
2722{
2723	const struct tcf_proto_ops *ops;
 
2724	void *tmplt_priv;
2725
2726	/* If kind is not set, user did not specify template. */
2727	if (!tca[TCA_KIND])
2728		return 0;
2729
2730	ops = tcf_proto_lookup_ops(nla_data(tca[TCA_KIND]), true, extack);
 
 
 
 
 
2731	if (IS_ERR(ops))
2732		return PTR_ERR(ops);
2733	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
 
2734		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
 
2735		return -EOPNOTSUPP;
2736	}
2737
2738	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2739	if (IS_ERR(tmplt_priv)) {
2740		module_put(ops->owner);
2741		return PTR_ERR(tmplt_priv);
2742	}
2743	chain->tmplt_ops = ops;
2744	chain->tmplt_priv = tmplt_priv;
2745	return 0;
2746}
2747
2748static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2749			       void *tmplt_priv)
2750{
2751	/* If template ops are set, no work to do for us. */
2752	if (!tmplt_ops)
2753		return;
2754
2755	tmplt_ops->tmplt_destroy(tmplt_priv);
2756	module_put(tmplt_ops->owner);
2757}
2758
2759/* Add/delete/get a chain */
2760
2761static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2762			struct netlink_ext_ack *extack)
2763{
2764	struct net *net = sock_net(skb->sk);
2765	struct nlattr *tca[TCA_MAX + 1];
2766	struct tcmsg *t;
2767	u32 parent;
2768	u32 chain_index;
2769	struct Qdisc *q = NULL;
2770	struct tcf_chain *chain = NULL;
2771	struct tcf_block *block;
2772	unsigned long cl;
2773	int err;
2774
2775	if (n->nlmsg_type != RTM_GETCHAIN &&
2776	    !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2777		return -EPERM;
2778
2779replay:
 
2780	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2781				     rtm_tca_policy, extack);
2782	if (err < 0)
2783		return err;
2784
2785	t = nlmsg_data(n);
2786	parent = t->tcm_parent;
2787	cl = 0;
2788
2789	block = tcf_block_find(net, &q, &parent, &cl,
2790			       t->tcm_ifindex, t->tcm_block_index, extack);
2791	if (IS_ERR(block))
2792		return PTR_ERR(block);
2793
2794	chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2795	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2796		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2797		err = -EINVAL;
2798		goto errout_block;
2799	}
2800
2801	mutex_lock(&block->lock);
2802	chain = tcf_chain_lookup(block, chain_index);
2803	if (n->nlmsg_type == RTM_NEWCHAIN) {
2804		if (chain) {
2805			if (tcf_chain_held_by_acts_only(chain)) {
2806				/* The chain exists only because there is
2807				 * some action referencing it.
2808				 */
2809				tcf_chain_hold(chain);
2810			} else {
2811				NL_SET_ERR_MSG(extack, "Filter chain already exists");
2812				err = -EEXIST;
2813				goto errout_block_locked;
2814			}
2815		} else {
2816			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2817				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2818				err = -ENOENT;
2819				goto errout_block_locked;
2820			}
2821			chain = tcf_chain_create(block, chain_index);
2822			if (!chain) {
2823				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2824				err = -ENOMEM;
2825				goto errout_block_locked;
2826			}
2827		}
2828	} else {
2829		if (!chain || tcf_chain_held_by_acts_only(chain)) {
2830			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2831			err = -EINVAL;
2832			goto errout_block_locked;
2833		}
2834		tcf_chain_hold(chain);
2835	}
2836
2837	if (n->nlmsg_type == RTM_NEWCHAIN) {
2838		/* Modifying chain requires holding parent block lock. In case
2839		 * the chain was successfully added, take a reference to the
2840		 * chain. This ensures that an empty chain does not disappear at
2841		 * the end of this function.
2842		 */
2843		tcf_chain_hold(chain);
2844		chain->explicitly_created = true;
2845	}
2846	mutex_unlock(&block->lock);
2847
2848	switch (n->nlmsg_type) {
2849	case RTM_NEWCHAIN:
2850		err = tc_chain_tmplt_add(chain, net, tca, extack);
2851		if (err) {
2852			tcf_chain_put_explicitly_created(chain);
2853			goto errout;
2854		}
2855
2856		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2857				RTM_NEWCHAIN, false);
2858		break;
2859	case RTM_DELCHAIN:
2860		tfilter_notify_chain(net, skb, block, q, parent, n,
2861				     chain, RTM_DELTFILTER, true);
2862		/* Flush the chain first as the user requested chain removal. */
2863		tcf_chain_flush(chain, true);
2864		/* In case the chain was successfully deleted, put a reference
2865		 * to the chain previously taken during addition.
2866		 */
2867		tcf_chain_put_explicitly_created(chain);
2868		break;
2869	case RTM_GETCHAIN:
2870		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2871				      n->nlmsg_seq, n->nlmsg_type, true);
2872		if (err < 0)
2873			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2874		break;
2875	default:
2876		err = -EOPNOTSUPP;
2877		NL_SET_ERR_MSG(extack, "Unsupported message type");
2878		goto errout;
2879	}
2880
2881errout:
2882	tcf_chain_put(chain);
2883errout_block:
2884	tcf_block_release(q, block, true);
2885	if (err == -EAGAIN)
2886		/* Replay the request. */
2887		goto replay;
2888	return err;
2889
2890errout_block_locked:
2891	mutex_unlock(&block->lock);
2892	goto errout_block;
2893}
2894
2895/* called with RTNL */
2896static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2897{
2898	struct net *net = sock_net(skb->sk);
2899	struct nlattr *tca[TCA_MAX + 1];
2900	struct Qdisc *q = NULL;
2901	struct tcf_block *block;
2902	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2903	struct tcf_chain *chain;
2904	long index_start;
2905	long index;
2906	u32 parent;
2907	int err;
2908
2909	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2910		return skb->len;
2911
2912	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2913				     rtm_tca_policy, cb->extack);
2914	if (err)
2915		return err;
2916
2917	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2918		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2919		if (!block)
2920			goto out;
2921		/* If we work with block index, q is NULL and parent value
2922		 * will never be used in the following code. The check
2923		 * in tcf_fill_node prevents it. However, compiler does not
2924		 * see that far, so set parent to zero to silence the warning
2925		 * about parent being uninitialized.
2926		 */
2927		parent = 0;
2928	} else {
2929		const struct Qdisc_class_ops *cops;
2930		struct net_device *dev;
2931		unsigned long cl = 0;
2932
2933		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2934		if (!dev)
2935			return skb->len;
2936
2937		parent = tcm->tcm_parent;
2938		if (!parent) {
2939			q = dev->qdisc;
2940			parent = q->handle;
2941		} else {
2942			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2943		}
2944		if (!q)
2945			goto out;
2946		cops = q->ops->cl_ops;
2947		if (!cops)
2948			goto out;
2949		if (!cops->tcf_block)
2950			goto out;
2951		if (TC_H_MIN(tcm->tcm_parent)) {
2952			cl = cops->find(q, tcm->tcm_parent);
2953			if (cl == 0)
2954				goto out;
2955		}
2956		block = cops->tcf_block(q, cl, NULL);
2957		if (!block)
2958			goto out;
2959		if (tcf_block_shared(block))
2960			q = NULL;
2961	}
2962
2963	index_start = cb->args[0];
2964	index = 0;
2965
2966	mutex_lock(&block->lock);
2967	list_for_each_entry(chain, &block->chain_list, list) {
2968		if ((tca[TCA_CHAIN] &&
2969		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2970			continue;
2971		if (index < index_start) {
2972			index++;
2973			continue;
2974		}
2975		if (tcf_chain_held_by_acts_only(chain))
2976			continue;
2977		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2978					 chain->index, net, skb, block,
2979					 NETLINK_CB(cb->skb).portid,
2980					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2981					 RTM_NEWCHAIN);
2982		if (err <= 0)
2983			break;
2984		index++;
2985	}
2986	mutex_unlock(&block->lock);
2987
2988	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2989		tcf_block_refcnt_put(block, true);
2990	cb->args[0] = index;
2991
2992out:
2993	/* If we did no progress, the error (EMSGSIZE) is real */
2994	if (skb->len == 0 && err)
2995		return err;
2996	return skb->len;
2997}
2998
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2999void tcf_exts_destroy(struct tcf_exts *exts)
3000{
 
 
3001#ifdef CONFIG_NET_CLS_ACT
3002	if (exts->actions) {
3003		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3004		kfree(exts->actions);
3005	}
3006	exts->nr_actions = 0;
3007#endif
3008}
3009EXPORT_SYMBOL(tcf_exts_destroy);
3010
3011int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3012		      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3013		      bool rtnl_held, struct netlink_ext_ack *extack)
3014{
3015#ifdef CONFIG_NET_CLS_ACT
3016	{
 
3017		struct tc_action *act;
3018		size_t attr_size = 0;
3019
3020		if (exts->police && tb[exts->police]) {
 
 
 
 
 
 
 
3021			act = tcf_action_init_1(net, tp, tb[exts->police],
3022						rate_tlv, "police", ovr,
3023						TCA_ACT_BIND, rtnl_held,
3024						extack);
 
3025			if (IS_ERR(act))
3026				return PTR_ERR(act);
3027
3028			act->type = exts->type = TCA_OLD_COMPAT;
3029			exts->actions[0] = act;
3030			exts->nr_actions = 1;
 
3031		} else if (exts->action && tb[exts->action]) {
3032			int err;
3033
 
3034			err = tcf_action_init(net, tp, tb[exts->action],
3035					      rate_tlv, NULL, ovr, TCA_ACT_BIND,
3036					      exts->actions, &attr_size,
3037					      rtnl_held, extack);
3038			if (err < 0)
3039				return err;
3040			exts->nr_actions = err;
3041		}
3042	}
3043#else
3044	if ((exts->action && tb[exts->action]) ||
3045	    (exts->police && tb[exts->police])) {
3046		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3047		return -EOPNOTSUPP;
3048	}
3049#endif
3050
3051	return 0;
3052}
 
 
 
 
 
 
 
 
 
3053EXPORT_SYMBOL(tcf_exts_validate);
3054
3055void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3056{
3057#ifdef CONFIG_NET_CLS_ACT
3058	struct tcf_exts old = *dst;
3059
3060	*dst = *src;
3061	tcf_exts_destroy(&old);
3062#endif
3063}
3064EXPORT_SYMBOL(tcf_exts_change);
3065
3066#ifdef CONFIG_NET_CLS_ACT
3067static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3068{
3069	if (exts->nr_actions == 0)
3070		return NULL;
3071	else
3072		return exts->actions[0];
3073}
3074#endif
3075
3076int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3077{
3078#ifdef CONFIG_NET_CLS_ACT
3079	struct nlattr *nest;
3080
3081	if (exts->action && tcf_exts_has_actions(exts)) {
3082		/*
3083		 * again for backward compatible mode - we want
3084		 * to work with both old and new modes of entering
3085		 * tc data even if iproute2  was newer - jhs
3086		 */
3087		if (exts->type != TCA_OLD_COMPAT) {
3088			nest = nla_nest_start_noflag(skb, exts->action);
3089			if (nest == NULL)
3090				goto nla_put_failure;
3091
3092			if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
 
3093				goto nla_put_failure;
3094			nla_nest_end(skb, nest);
3095		} else if (exts->police) {
3096			struct tc_action *act = tcf_exts_first_act(exts);
3097			nest = nla_nest_start_noflag(skb, exts->police);
3098			if (nest == NULL || !act)
3099				goto nla_put_failure;
3100			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3101				goto nla_put_failure;
3102			nla_nest_end(skb, nest);
3103		}
3104	}
3105	return 0;
3106
3107nla_put_failure:
3108	nla_nest_cancel(skb, nest);
3109	return -1;
3110#else
3111	return 0;
3112#endif
3113}
3114EXPORT_SYMBOL(tcf_exts_dump);
3115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3116
3117int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3118{
3119#ifdef CONFIG_NET_CLS_ACT
3120	struct tc_action *a = tcf_exts_first_act(exts);
3121	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3122		return -1;
3123#endif
3124	return 0;
3125}
3126EXPORT_SYMBOL(tcf_exts_dump_stats);
3127
3128static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3129{
3130	if (*flags & TCA_CLS_FLAGS_IN_HW)
3131		return;
3132	*flags |= TCA_CLS_FLAGS_IN_HW;
3133	atomic_inc(&block->offloadcnt);
3134}
3135
3136static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3137{
3138	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3139		return;
3140	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3141	atomic_dec(&block->offloadcnt);
3142}
3143
3144static void tc_cls_offload_cnt_update(struct tcf_block *block,
3145				      struct tcf_proto *tp, u32 *cnt,
3146				      u32 *flags, u32 diff, bool add)
3147{
3148	lockdep_assert_held(&block->cb_lock);
3149
3150	spin_lock(&tp->lock);
3151	if (add) {
3152		if (!*cnt)
3153			tcf_block_offload_inc(block, flags);
3154		*cnt += diff;
3155	} else {
3156		*cnt -= diff;
3157		if (!*cnt)
3158			tcf_block_offload_dec(block, flags);
3159	}
3160	spin_unlock(&tp->lock);
3161}
3162
3163static void
3164tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3165			 u32 *cnt, u32 *flags)
3166{
3167	lockdep_assert_held(&block->cb_lock);
3168
3169	spin_lock(&tp->lock);
3170	tcf_block_offload_dec(block, flags);
3171	*cnt = 0;
3172	spin_unlock(&tp->lock);
3173}
3174
3175static int
3176__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3177		   void *type_data, bool err_stop)
3178{
3179	struct flow_block_cb *block_cb;
3180	int ok_count = 0;
3181	int err;
3182
3183	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3184		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3185		if (err) {
3186			if (err_stop)
3187				return err;
3188		} else {
3189			ok_count++;
3190		}
3191	}
3192	return ok_count;
3193}
3194
3195int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3196		     void *type_data, bool err_stop, bool rtnl_held)
3197{
3198	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3199	int ok_count;
3200
3201retry:
3202	if (take_rtnl)
3203		rtnl_lock();
3204	down_read(&block->cb_lock);
3205	/* Need to obtain rtnl lock if block is bound to devs that require it.
3206	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3207	 * obtain the locks in same order here.
3208	 */
3209	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3210		up_read(&block->cb_lock);
3211		take_rtnl = true;
3212		goto retry;
3213	}
3214
3215	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3216
3217	up_read(&block->cb_lock);
3218	if (take_rtnl)
3219		rtnl_unlock();
3220	return ok_count;
3221}
3222EXPORT_SYMBOL(tc_setup_cb_call);
3223
3224/* Non-destructive filter add. If filter that wasn't already in hardware is
3225 * successfully offloaded, increment block offloads counter. On failure,
3226 * previously offloaded filter is considered to be intact and offloads counter
3227 * is not decremented.
3228 */
3229
3230int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3231		    enum tc_setup_type type, void *type_data, bool err_stop,
3232		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3233{
3234	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3235	int ok_count;
3236
3237retry:
3238	if (take_rtnl)
3239		rtnl_lock();
3240	down_read(&block->cb_lock);
3241	/* Need to obtain rtnl lock if block is bound to devs that require it.
3242	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3243	 * obtain the locks in same order here.
3244	 */
3245	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3246		up_read(&block->cb_lock);
3247		take_rtnl = true;
3248		goto retry;
3249	}
3250
3251	/* Make sure all netdevs sharing this block are offload-capable. */
3252	if (block->nooffloaddevcnt && err_stop) {
3253		ok_count = -EOPNOTSUPP;
3254		goto err_unlock;
3255	}
3256
3257	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3258	if (ok_count < 0)
3259		goto err_unlock;
3260
3261	if (tp->ops->hw_add)
3262		tp->ops->hw_add(tp, type_data);
3263	if (ok_count > 0)
3264		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3265					  ok_count, true);
3266err_unlock:
3267	up_read(&block->cb_lock);
3268	if (take_rtnl)
3269		rtnl_unlock();
3270	return ok_count < 0 ? ok_count : 0;
3271}
3272EXPORT_SYMBOL(tc_setup_cb_add);
3273
3274/* Destructive filter replace. If filter that wasn't already in hardware is
3275 * successfully offloaded, increment block offload counter. On failure,
3276 * previously offloaded filter is considered to be destroyed and offload counter
3277 * is decremented.
3278 */
3279
3280int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3281			enum tc_setup_type type, void *type_data, bool err_stop,
3282			u32 *old_flags, unsigned int *old_in_hw_count,
3283			u32 *new_flags, unsigned int *new_in_hw_count,
3284			bool rtnl_held)
3285{
3286	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3287	int ok_count;
3288
3289retry:
3290	if (take_rtnl)
3291		rtnl_lock();
3292	down_read(&block->cb_lock);
3293	/* Need to obtain rtnl lock if block is bound to devs that require it.
3294	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3295	 * obtain the locks in same order here.
3296	 */
3297	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3298		up_read(&block->cb_lock);
3299		take_rtnl = true;
3300		goto retry;
3301	}
3302
3303	/* Make sure all netdevs sharing this block are offload-capable. */
3304	if (block->nooffloaddevcnt && err_stop) {
3305		ok_count = -EOPNOTSUPP;
3306		goto err_unlock;
3307	}
3308
3309	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3310	if (tp->ops->hw_del)
3311		tp->ops->hw_del(tp, type_data);
3312
3313	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3314	if (ok_count < 0)
3315		goto err_unlock;
3316
3317	if (tp->ops->hw_add)
3318		tp->ops->hw_add(tp, type_data);
3319	if (ok_count > 0)
3320		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3321					  new_flags, ok_count, true);
3322err_unlock:
3323	up_read(&block->cb_lock);
3324	if (take_rtnl)
3325		rtnl_unlock();
3326	return ok_count < 0 ? ok_count : 0;
3327}
3328EXPORT_SYMBOL(tc_setup_cb_replace);
3329
3330/* Destroy filter and decrement block offload counter, if filter was previously
3331 * offloaded.
3332 */
3333
3334int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3335			enum tc_setup_type type, void *type_data, bool err_stop,
3336			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3337{
3338	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3339	int ok_count;
3340
3341retry:
3342	if (take_rtnl)
3343		rtnl_lock();
3344	down_read(&block->cb_lock);
3345	/* Need to obtain rtnl lock if block is bound to devs that require it.
3346	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3347	 * obtain the locks in same order here.
3348	 */
3349	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3350		up_read(&block->cb_lock);
3351		take_rtnl = true;
3352		goto retry;
3353	}
3354
3355	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3356
3357	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3358	if (tp->ops->hw_del)
3359		tp->ops->hw_del(tp, type_data);
3360
3361	up_read(&block->cb_lock);
3362	if (take_rtnl)
3363		rtnl_unlock();
3364	return ok_count < 0 ? ok_count : 0;
3365}
3366EXPORT_SYMBOL(tc_setup_cb_destroy);
3367
3368int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3369			  bool add, flow_setup_cb_t *cb,
3370			  enum tc_setup_type type, void *type_data,
3371			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3372{
3373	int err = cb(type, type_data, cb_priv);
3374
3375	if (err) {
3376		if (add && tc_skip_sw(*flags))
3377			return err;
3378	} else {
3379		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3380					  add);
3381	}
3382
3383	return 0;
3384}
3385EXPORT_SYMBOL(tc_setup_cb_reoffload);
3386
3387void tc_cleanup_flow_action(struct flow_action *flow_action)
 
3388{
3389	struct flow_action_entry *entry;
3390	int i;
3391
3392	flow_action_for_each(i, entry, flow_action)
3393		if (entry->destructor)
3394			entry->destructor(entry->destructor_priv);
 
 
 
 
 
 
 
 
3395}
3396EXPORT_SYMBOL(tc_cleanup_flow_action);
3397
3398static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3399			       const struct tc_action *act)
3400{
3401#ifdef CONFIG_NET_CLS_ACT
3402	entry->dev = act->ops->get_dev(act, &entry->destructor);
3403	if (!entry->dev)
3404		return;
3405	entry->destructor_priv = entry->dev;
3406#endif
3407}
3408
3409static void tcf_tunnel_encap_put_tunnel(void *priv)
3410{
3411	struct ip_tunnel_info *tunnel = priv;
3412
3413	kfree(tunnel);
3414}
3415
3416static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3417				       const struct tc_action *act)
3418{
3419	entry->tunnel = tcf_tunnel_info_copy(act);
3420	if (!entry->tunnel)
3421		return -ENOMEM;
3422	entry->destructor = tcf_tunnel_encap_put_tunnel;
3423	entry->destructor_priv = entry->tunnel;
3424	return 0;
3425}
 
3426
3427static void tcf_sample_get_group(struct flow_action_entry *entry,
3428				 const struct tc_action *act)
 
 
3429{
3430#ifdef CONFIG_NET_CLS_ACT
3431	entry->sample.psample_group =
3432		act->ops->get_psample_group(act, &entry->destructor);
3433	entry->destructor_priv = entry->sample.psample_group;
 
 
 
 
 
 
3434#endif
3435}
3436
3437int tc_setup_flow_action(struct flow_action *flow_action,
3438			 const struct tcf_exts *exts, bool rtnl_held)
 
 
3439{
3440	const struct tc_action *act;
3441	int i, j, k, err = 0;
3442
3443	if (!exts)
3444		return 0;
 
3445
3446	if (!rtnl_held)
3447		rtnl_lock();
3448
3449	j = 0;
3450	tcf_exts_for_each_action(i, act, exts) {
3451		struct flow_action_entry *entry;
3452
3453		entry = &flow_action->entries[j];
3454		if (is_tcf_gact_ok(act)) {
3455			entry->id = FLOW_ACTION_ACCEPT;
3456		} else if (is_tcf_gact_shot(act)) {
3457			entry->id = FLOW_ACTION_DROP;
3458		} else if (is_tcf_gact_trap(act)) {
3459			entry->id = FLOW_ACTION_TRAP;
3460		} else if (is_tcf_gact_goto_chain(act)) {
3461			entry->id = FLOW_ACTION_GOTO;
3462			entry->chain_index = tcf_gact_goto_chain_index(act);
3463		} else if (is_tcf_mirred_egress_redirect(act)) {
3464			entry->id = FLOW_ACTION_REDIRECT;
3465			tcf_mirred_get_dev(entry, act);
3466		} else if (is_tcf_mirred_egress_mirror(act)) {
3467			entry->id = FLOW_ACTION_MIRRED;
3468			tcf_mirred_get_dev(entry, act);
3469		} else if (is_tcf_mirred_ingress_redirect(act)) {
3470			entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3471			tcf_mirred_get_dev(entry, act);
3472		} else if (is_tcf_mirred_ingress_mirror(act)) {
3473			entry->id = FLOW_ACTION_MIRRED_INGRESS;
3474			tcf_mirred_get_dev(entry, act);
3475		} else if (is_tcf_vlan(act)) {
3476			switch (tcf_vlan_action(act)) {
3477			case TCA_VLAN_ACT_PUSH:
3478				entry->id = FLOW_ACTION_VLAN_PUSH;
3479				entry->vlan.vid = tcf_vlan_push_vid(act);
3480				entry->vlan.proto = tcf_vlan_push_proto(act);
3481				entry->vlan.prio = tcf_vlan_push_prio(act);
3482				break;
3483			case TCA_VLAN_ACT_POP:
3484				entry->id = FLOW_ACTION_VLAN_POP;
3485				break;
3486			case TCA_VLAN_ACT_MODIFY:
3487				entry->id = FLOW_ACTION_VLAN_MANGLE;
3488				entry->vlan.vid = tcf_vlan_push_vid(act);
3489				entry->vlan.proto = tcf_vlan_push_proto(act);
3490				entry->vlan.prio = tcf_vlan_push_prio(act);
3491				break;
3492			default:
3493				err = -EOPNOTSUPP;
3494				goto err_out;
3495			}
3496		} else if (is_tcf_tunnel_set(act)) {
3497			entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3498			err = tcf_tunnel_encap_get_tunnel(entry, act);
3499			if (err)
3500				goto err_out;
3501		} else if (is_tcf_tunnel_release(act)) {
3502			entry->id = FLOW_ACTION_TUNNEL_DECAP;
3503		} else if (is_tcf_pedit(act)) {
3504			for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3505				switch (tcf_pedit_cmd(act, k)) {
3506				case TCA_PEDIT_KEY_EX_CMD_SET:
3507					entry->id = FLOW_ACTION_MANGLE;
3508					break;
3509				case TCA_PEDIT_KEY_EX_CMD_ADD:
3510					entry->id = FLOW_ACTION_ADD;
3511					break;
3512				default:
3513					err = -EOPNOTSUPP;
3514					goto err_out;
3515				}
3516				entry->mangle.htype = tcf_pedit_htype(act, k);
3517				entry->mangle.mask = tcf_pedit_mask(act, k);
3518				entry->mangle.val = tcf_pedit_val(act, k);
3519				entry->mangle.offset = tcf_pedit_offset(act, k);
3520				entry = &flow_action->entries[++j];
3521			}
3522		} else if (is_tcf_csum(act)) {
3523			entry->id = FLOW_ACTION_CSUM;
3524			entry->csum_flags = tcf_csum_update_flags(act);
3525		} else if (is_tcf_skbedit_mark(act)) {
3526			entry->id = FLOW_ACTION_MARK;
3527			entry->mark = tcf_skbedit_mark(act);
3528		} else if (is_tcf_sample(act)) {
3529			entry->id = FLOW_ACTION_SAMPLE;
3530			entry->sample.trunc_size = tcf_sample_trunc_size(act);
3531			entry->sample.truncate = tcf_sample_truncate(act);
3532			entry->sample.rate = tcf_sample_rate(act);
3533			tcf_sample_get_group(entry, act);
3534		} else if (is_tcf_police(act)) {
3535			entry->id = FLOW_ACTION_POLICE;
3536			entry->police.burst = tcf_police_tcfp_burst(act);
3537			entry->police.rate_bytes_ps =
3538				tcf_police_rate_bytes_ps(act);
3539		} else if (is_tcf_ct(act)) {
3540			entry->id = FLOW_ACTION_CT;
3541			entry->ct.action = tcf_ct_action(act);
3542			entry->ct.zone = tcf_ct_zone(act);
3543		} else if (is_tcf_mpls(act)) {
3544			switch (tcf_mpls_action(act)) {
3545			case TCA_MPLS_ACT_PUSH:
3546				entry->id = FLOW_ACTION_MPLS_PUSH;
3547				entry->mpls_push.proto = tcf_mpls_proto(act);
3548				entry->mpls_push.label = tcf_mpls_label(act);
3549				entry->mpls_push.tc = tcf_mpls_tc(act);
3550				entry->mpls_push.bos = tcf_mpls_bos(act);
3551				entry->mpls_push.ttl = tcf_mpls_ttl(act);
3552				break;
3553			case TCA_MPLS_ACT_POP:
3554				entry->id = FLOW_ACTION_MPLS_POP;
3555				entry->mpls_pop.proto = tcf_mpls_proto(act);
3556				break;
3557			case TCA_MPLS_ACT_MODIFY:
3558				entry->id = FLOW_ACTION_MPLS_MANGLE;
3559				entry->mpls_mangle.label = tcf_mpls_label(act);
3560				entry->mpls_mangle.tc = tcf_mpls_tc(act);
3561				entry->mpls_mangle.bos = tcf_mpls_bos(act);
3562				entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3563				break;
3564			default:
3565				goto err_out;
3566			}
3567		} else if (is_tcf_skbedit_ptype(act)) {
3568			entry->id = FLOW_ACTION_PTYPE;
3569			entry->ptype = tcf_skbedit_ptype(act);
3570		} else {
3571			err = -EOPNOTSUPP;
3572			goto err_out;
3573		}
3574
3575		if (!is_tcf_pedit(act))
3576			j++;
 
3577	}
3578
3579err_out:
3580	if (!rtnl_held)
3581		rtnl_unlock();
3582
3583	if (err)
3584		tc_cleanup_flow_action(flow_action);
3585
3586	return err;
 
 
 
3587}
3588EXPORT_SYMBOL(tc_setup_flow_action);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3589
3590unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3591{
3592	unsigned int num_acts = 0;
3593	struct tc_action *act;
3594	int i;
3595
3596	tcf_exts_for_each_action(i, act, exts) {
3597		if (is_tcf_pedit(act))
3598			num_acts += tcf_pedit_nkeys(act);
3599		else
3600			num_acts++;
3601	}
3602	return num_acts;
3603}
3604EXPORT_SYMBOL(tcf_exts_num_actions);
3605
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3606static __net_init int tcf_net_init(struct net *net)
3607{
3608	struct tcf_net *tn = net_generic(net, tcf_net_id);
3609
3610	spin_lock_init(&tn->idr_lock);
3611	idr_init(&tn->idr);
3612	return 0;
3613}
3614
3615static void __net_exit tcf_net_exit(struct net *net)
3616{
3617	struct tcf_net *tn = net_generic(net, tcf_net_id);
3618
3619	idr_destroy(&tn->idr);
3620}
3621
3622static struct pernet_operations tcf_net_ops = {
3623	.init = tcf_net_init,
3624	.exit = tcf_net_exit,
3625	.id   = &tcf_net_id,
3626	.size = sizeof(struct tcf_net),
3627};
3628
3629static struct flow_indr_block_ing_entry block_ing_entry = {
3630	.cb = tc_indr_block_get_and_ing_cmd,
3631	.list = LIST_HEAD_INIT(block_ing_entry.list),
 
 
 
 
 
 
 
 
3632};
3633
3634static int __init tc_filter_init(void)
3635{
3636	int err;
3637
3638	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3639	if (!tc_filter_wq)
3640		return -ENOMEM;
3641
3642	err = register_pernet_subsys(&tcf_net_ops);
3643	if (err)
3644		goto err_register_pernet_subsys;
3645
3646	flow_indr_add_block_ing_cb(&block_ing_entry);
3647
3648	rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3649		      RTNL_FLAG_DOIT_UNLOCKED);
3650	rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3651		      RTNL_FLAG_DOIT_UNLOCKED);
3652	rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3653		      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3654	rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3655	rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3656	rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3657		      tc_dump_chain, 0);
3658
3659	return 0;
3660
3661err_register_pernet_subsys:
3662	destroy_workqueue(tc_filter_wq);
3663	return err;
3664}
3665
3666subsys_initcall(tc_filter_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_api.c	Packet classifier API.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *
   7 * Changes:
   8 *
   9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/types.h>
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/err.h>
  18#include <linux/skbuff.h>
  19#include <linux/init.h>
  20#include <linux/kmod.h>
  21#include <linux/slab.h>
  22#include <linux/idr.h>
 
  23#include <linux/jhash.h>
  24#include <linux/rculist.h>
  25#include <linux/rhashtable.h>
  26#include <net/net_namespace.h>
  27#include <net/sock.h>
  28#include <net/netlink.h>
  29#include <net/pkt_sched.h>
  30#include <net/pkt_cls.h>
  31#include <net/tc_act/tc_pedit.h>
  32#include <net/tc_act/tc_mirred.h>
  33#include <net/tc_act/tc_vlan.h>
  34#include <net/tc_act/tc_tunnel_key.h>
  35#include <net/tc_act/tc_csum.h>
  36#include <net/tc_act/tc_gact.h>
  37#include <net/tc_act/tc_police.h>
  38#include <net/tc_act/tc_sample.h>
  39#include <net/tc_act/tc_skbedit.h>
  40#include <net/tc_act/tc_ct.h>
  41#include <net/tc_act/tc_mpls.h>
  42#include <net/tc_act/tc_gate.h>
  43#include <net/flow_offload.h>
  44#include <net/tc_wrapper.h>
 
  45
  46/* The list of all installed classifier types */
  47static LIST_HEAD(tcf_proto_base);
  48
  49/* Protects list of registered TC modules. It is pure SMP lock. */
  50static DEFINE_RWLOCK(cls_mod_lock);
  51
  52static struct xarray tcf_exts_miss_cookies_xa;
  53struct tcf_exts_miss_cookie_node {
  54	const struct tcf_chain *chain;
  55	const struct tcf_proto *tp;
  56	const struct tcf_exts *exts;
  57	u32 chain_index;
  58	u32 tp_prio;
  59	u32 handle;
  60	u32 miss_cookie_base;
  61	struct rcu_head rcu;
  62};
  63
  64/* Each tc action entry cookie will be comprised of 32bit miss_cookie_base +
  65 * action index in the exts tc actions array.
  66 */
  67union tcf_exts_miss_cookie {
  68	struct {
  69		u32 miss_cookie_base;
  70		u32 act_index;
  71	};
  72	u64 miss_cookie;
  73};
  74
  75#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
  76static int
  77tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
  78				u32 handle)
  79{
  80	struct tcf_exts_miss_cookie_node *n;
  81	static u32 next;
  82	int err;
  83
  84	if (WARN_ON(!handle || !tp->ops->get_exts))
  85		return -EINVAL;
  86
  87	n = kzalloc(sizeof(*n), GFP_KERNEL);
  88	if (!n)
  89		return -ENOMEM;
  90
  91	n->chain_index = tp->chain->index;
  92	n->chain = tp->chain;
  93	n->tp_prio = tp->prio;
  94	n->tp = tp;
  95	n->exts = exts;
  96	n->handle = handle;
  97
  98	err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
  99			      n, xa_limit_32b, &next, GFP_KERNEL);
 100	if (err < 0)
 101		goto err_xa_alloc;
 102
 103	exts->miss_cookie_node = n;
 104	return 0;
 105
 106err_xa_alloc:
 107	kfree(n);
 108	return err;
 109}
 110
 111static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
 112{
 113	struct tcf_exts_miss_cookie_node *n;
 114
 115	if (!exts->miss_cookie_node)
 116		return;
 117
 118	n = exts->miss_cookie_node;
 119	xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base);
 120	kfree_rcu(n, rcu);
 121}
 122
 123static struct tcf_exts_miss_cookie_node *
 124tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index)
 125{
 126	union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, };
 127
 128	*act_index = mc.act_index;
 129	return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base);
 130}
 131#else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
 132static int
 133tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
 134				u32 handle)
 135{
 136	return 0;
 137}
 138
 139static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts)
 140{
 141}
 142#endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */
 143
 144static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index)
 145{
 146	union tcf_exts_miss_cookie mc = { .act_index = act_index, };
 147
 148	if (!miss_cookie_base)
 149		return 0;
 150
 151	mc.miss_cookie_base = miss_cookie_base;
 152	return mc.miss_cookie;
 153}
 154
 155#ifdef CONFIG_NET_CLS_ACT
 156DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc);
 157EXPORT_SYMBOL(tc_skb_ext_tc);
 158
 159void tc_skb_ext_tc_enable(void)
 160{
 161	static_branch_inc(&tc_skb_ext_tc);
 162}
 163EXPORT_SYMBOL(tc_skb_ext_tc_enable);
 164
 165void tc_skb_ext_tc_disable(void)
 166{
 167	static_branch_dec(&tc_skb_ext_tc);
 168}
 169EXPORT_SYMBOL(tc_skb_ext_tc_disable);
 170#endif
 171
 172static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
 173{
 174	return jhash_3words(tp->chain->index, tp->prio,
 175			    (__force __u32)tp->protocol, 0);
 176}
 177
 178static void tcf_proto_signal_destroying(struct tcf_chain *chain,
 179					struct tcf_proto *tp)
 180{
 181	struct tcf_block *block = chain->block;
 182
 183	mutex_lock(&block->proto_destroy_lock);
 184	hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
 185		     destroy_obj_hashfn(tp));
 186	mutex_unlock(&block->proto_destroy_lock);
 187}
 188
 189static bool tcf_proto_cmp(const struct tcf_proto *tp1,
 190			  const struct tcf_proto *tp2)
 191{
 192	return tp1->chain->index == tp2->chain->index &&
 193	       tp1->prio == tp2->prio &&
 194	       tp1->protocol == tp2->protocol;
 195}
 196
 197static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
 198					struct tcf_proto *tp)
 199{
 200	u32 hash = destroy_obj_hashfn(tp);
 201	struct tcf_proto *iter;
 202	bool found = false;
 203
 204	rcu_read_lock();
 205	hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
 206				   destroy_ht_node, hash) {
 207		if (tcf_proto_cmp(tp, iter)) {
 208			found = true;
 209			break;
 210		}
 211	}
 212	rcu_read_unlock();
 213
 214	return found;
 215}
 216
 217static void
 218tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
 219{
 220	struct tcf_block *block = chain->block;
 221
 222	mutex_lock(&block->proto_destroy_lock);
 223	if (hash_hashed(&tp->destroy_ht_node))
 224		hash_del_rcu(&tp->destroy_ht_node);
 225	mutex_unlock(&block->proto_destroy_lock);
 226}
 227
 228/* Find classifier type by string name */
 229
 230static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
 231{
 232	const struct tcf_proto_ops *t, *res = NULL;
 233
 234	if (kind) {
 235		read_lock(&cls_mod_lock);
 236		list_for_each_entry(t, &tcf_proto_base, head) {
 237			if (strcmp(kind, t->kind) == 0) {
 238				if (try_module_get(t->owner))
 239					res = t;
 240				break;
 241			}
 242		}
 243		read_unlock(&cls_mod_lock);
 244	}
 245	return res;
 246}
 247
 248static const struct tcf_proto_ops *
 249tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
 250		     struct netlink_ext_ack *extack)
 251{
 252	const struct tcf_proto_ops *ops;
 253
 254	ops = __tcf_proto_lookup_ops(kind);
 255	if (ops)
 256		return ops;
 257#ifdef CONFIG_MODULES
 258	if (rtnl_held)
 259		rtnl_unlock();
 260	request_module(NET_CLS_ALIAS_PREFIX "%s", kind);
 261	if (rtnl_held)
 262		rtnl_lock();
 263	ops = __tcf_proto_lookup_ops(kind);
 264	/* We dropped the RTNL semaphore in order to perform
 265	 * the module load. So, even if we succeeded in loading
 266	 * the module we have to replay the request. We indicate
 267	 * this using -EAGAIN.
 268	 */
 269	if (ops) {
 270		module_put(ops->owner);
 271		return ERR_PTR(-EAGAIN);
 272	}
 273#endif
 274	NL_SET_ERR_MSG(extack, "TC classifier not found");
 275	return ERR_PTR(-ENOENT);
 276}
 277
 278/* Register(unregister) new classifier type */
 279
 280int register_tcf_proto_ops(struct tcf_proto_ops *ops)
 281{
 282	struct tcf_proto_ops *t;
 283	int rc = -EEXIST;
 284
 285	write_lock(&cls_mod_lock);
 286	list_for_each_entry(t, &tcf_proto_base, head)
 287		if (!strcmp(ops->kind, t->kind))
 288			goto out;
 289
 290	list_add_tail(&ops->head, &tcf_proto_base);
 291	rc = 0;
 292out:
 293	write_unlock(&cls_mod_lock);
 294	return rc;
 295}
 296EXPORT_SYMBOL(register_tcf_proto_ops);
 297
 298static struct workqueue_struct *tc_filter_wq;
 299
 300void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
 301{
 302	struct tcf_proto_ops *t;
 303	int rc = -ENOENT;
 304
 305	/* Wait for outstanding call_rcu()s, if any, from a
 306	 * tcf_proto_ops's destroy() handler.
 307	 */
 308	rcu_barrier();
 309	flush_workqueue(tc_filter_wq);
 310
 311	write_lock(&cls_mod_lock);
 312	list_for_each_entry(t, &tcf_proto_base, head) {
 313		if (t == ops) {
 314			list_del(&t->head);
 315			rc = 0;
 316			break;
 317		}
 318	}
 319	write_unlock(&cls_mod_lock);
 320
 321	WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
 322}
 323EXPORT_SYMBOL(unregister_tcf_proto_ops);
 324
 325bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
 326{
 327	INIT_RCU_WORK(rwork, func);
 328	return queue_rcu_work(tc_filter_wq, rwork);
 329}
 330EXPORT_SYMBOL(tcf_queue_work);
 331
 332/* Select new prio value from the range, managed by kernel. */
 333
 334static inline u32 tcf_auto_prio(struct tcf_proto *tp)
 335{
 336	u32 first = TC_H_MAKE(0xC0000000U, 0U);
 337
 338	if (tp)
 339		first = tp->prio - 1;
 340
 341	return TC_H_MAJ(first);
 342}
 343
 344static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
 345{
 346	if (kind)
 347		return nla_strscpy(name, kind, IFNAMSIZ) < 0;
 348	memset(name, 0, IFNAMSIZ);
 349	return false;
 350}
 351
 352static bool tcf_proto_is_unlocked(const char *kind)
 353{
 354	const struct tcf_proto_ops *ops;
 355	bool ret;
 356
 357	if (strlen(kind) == 0)
 358		return false;
 359
 360	ops = tcf_proto_lookup_ops(kind, false, NULL);
 361	/* On error return false to take rtnl lock. Proto lookup/create
 362	 * functions will perform lookup again and properly handle errors.
 363	 */
 364	if (IS_ERR(ops))
 365		return false;
 366
 367	ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
 368	module_put(ops->owner);
 369	return ret;
 370}
 371
 372static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
 373					  u32 prio, struct tcf_chain *chain,
 374					  bool rtnl_held,
 375					  struct netlink_ext_ack *extack)
 376{
 377	struct tcf_proto *tp;
 378	int err;
 379
 380	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
 381	if (!tp)
 382		return ERR_PTR(-ENOBUFS);
 383
 384	tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
 385	if (IS_ERR(tp->ops)) {
 386		err = PTR_ERR(tp->ops);
 387		goto errout;
 388	}
 389	tp->classify = tp->ops->classify;
 390	tp->protocol = protocol;
 391	tp->prio = prio;
 392	tp->chain = chain;
 393	tp->usesw = !tp->ops->reoffload;
 394	spin_lock_init(&tp->lock);
 395	refcount_set(&tp->refcnt, 1);
 396
 397	err = tp->ops->init(tp);
 398	if (err) {
 399		module_put(tp->ops->owner);
 400		goto errout;
 401	}
 402	return tp;
 403
 404errout:
 405	kfree(tp);
 406	return ERR_PTR(err);
 407}
 408
 409static void tcf_proto_get(struct tcf_proto *tp)
 410{
 411	refcount_inc(&tp->refcnt);
 412}
 413
 414static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
 415{
 416#ifdef CONFIG_NET_CLS_ACT
 417	struct tcf_block *block = tp->chain->block;
 418	bool counted = false;
 419
 420	if (!add) {
 421		if (tp->usesw && tp->counted) {
 422			if (!atomic_dec_return(&block->useswcnt))
 423				static_branch_dec(&tcf_sw_enabled_key);
 424			tp->counted = false;
 425		}
 426		return;
 427	}
 428
 429	spin_lock(&tp->lock);
 430	if (tp->usesw && !tp->counted) {
 431		counted = true;
 432		tp->counted = true;
 433	}
 434	spin_unlock(&tp->lock);
 435
 436	if (counted && atomic_inc_return(&block->useswcnt) == 1)
 437		static_branch_inc(&tcf_sw_enabled_key);
 438#endif
 439}
 440
 441static void tcf_chain_put(struct tcf_chain *chain);
 442
 443static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
 444			      bool sig_destroy, struct netlink_ext_ack *extack)
 445{
 446	tp->ops->destroy(tp, rtnl_held, extack);
 447	tcf_proto_count_usesw(tp, false);
 448	if (sig_destroy)
 449		tcf_proto_signal_destroyed(tp->chain, tp);
 450	tcf_chain_put(tp->chain);
 451	module_put(tp->ops->owner);
 452	kfree_rcu(tp, rcu);
 453}
 454
 455static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
 456			  struct netlink_ext_ack *extack)
 457{
 458	if (refcount_dec_and_test(&tp->refcnt))
 459		tcf_proto_destroy(tp, rtnl_held, true, extack);
 460}
 461
 462static bool tcf_proto_check_delete(struct tcf_proto *tp)
 
 
 
 
 
 
 
 
 
 
 463{
 464	if (tp->ops->delete_empty)
 465		return tp->ops->delete_empty(tp);
 466
 467	tp->deleting = true;
 
 
 
 
 
 
 
 
 
 
 
 
 468	return tp->deleting;
 469}
 470
 471static void tcf_proto_mark_delete(struct tcf_proto *tp)
 472{
 473	spin_lock(&tp->lock);
 474	tp->deleting = true;
 475	spin_unlock(&tp->lock);
 476}
 477
 478static bool tcf_proto_is_deleting(struct tcf_proto *tp)
 479{
 480	bool deleting;
 481
 482	spin_lock(&tp->lock);
 483	deleting = tp->deleting;
 484	spin_unlock(&tp->lock);
 485
 486	return deleting;
 487}
 488
 489#define ASSERT_BLOCK_LOCKED(block)					\
 490	lockdep_assert_held(&(block)->lock)
 491
 492struct tcf_filter_chain_list_item {
 493	struct list_head list;
 494	tcf_chain_head_change_t *chain_head_change;
 495	void *chain_head_change_priv;
 496};
 497
 498static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 499					  u32 chain_index)
 500{
 501	struct tcf_chain *chain;
 502
 503	ASSERT_BLOCK_LOCKED(block);
 504
 505	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
 506	if (!chain)
 507		return NULL;
 508	list_add_tail_rcu(&chain->list, &block->chain_list);
 509	mutex_init(&chain->filter_chain_lock);
 510	chain->block = block;
 511	chain->index = chain_index;
 512	chain->refcnt = 1;
 513	if (!chain->index)
 514		block->chain0.chain = chain;
 515	return chain;
 516}
 517
 518static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
 519				       struct tcf_proto *tp_head)
 520{
 521	if (item->chain_head_change)
 522		item->chain_head_change(tp_head, item->chain_head_change_priv);
 523}
 524
 525static void tcf_chain0_head_change(struct tcf_chain *chain,
 526				   struct tcf_proto *tp_head)
 527{
 528	struct tcf_filter_chain_list_item *item;
 529	struct tcf_block *block = chain->block;
 530
 531	if (chain->index)
 532		return;
 533
 534	mutex_lock(&block->lock);
 535	list_for_each_entry(item, &block->chain0.filter_chain_list, list)
 536		tcf_chain_head_change_item(item, tp_head);
 537	mutex_unlock(&block->lock);
 538}
 539
 540/* Returns true if block can be safely freed. */
 541
 542static bool tcf_chain_detach(struct tcf_chain *chain)
 543{
 544	struct tcf_block *block = chain->block;
 545
 546	ASSERT_BLOCK_LOCKED(block);
 547
 548	list_del_rcu(&chain->list);
 549	if (!chain->index)
 550		block->chain0.chain = NULL;
 551
 552	if (list_empty(&block->chain_list) &&
 553	    refcount_read(&block->refcnt) == 0)
 554		return true;
 555
 556	return false;
 557}
 558
 559static void tcf_block_destroy(struct tcf_block *block)
 560{
 561	mutex_destroy(&block->lock);
 562	mutex_destroy(&block->proto_destroy_lock);
 563	xa_destroy(&block->ports);
 564	kfree_rcu(block, rcu);
 565}
 566
 567static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
 568{
 569	struct tcf_block *block = chain->block;
 570
 571	mutex_destroy(&chain->filter_chain_lock);
 572	kfree_rcu(chain, rcu);
 573	if (free_block)
 574		tcf_block_destroy(block);
 575}
 576
 577static void tcf_chain_hold(struct tcf_chain *chain)
 578{
 579	ASSERT_BLOCK_LOCKED(chain->block);
 580
 581	++chain->refcnt;
 582}
 583
 584static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
 585{
 586	ASSERT_BLOCK_LOCKED(chain->block);
 587
 588	/* In case all the references are action references, this
 589	 * chain should not be shown to the user.
 590	 */
 591	return chain->refcnt == chain->action_refcnt;
 592}
 593
 594static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
 595					  u32 chain_index)
 596{
 597	struct tcf_chain *chain;
 598
 599	ASSERT_BLOCK_LOCKED(block);
 600
 601	list_for_each_entry(chain, &block->chain_list, list) {
 602		if (chain->index == chain_index)
 603			return chain;
 604	}
 605	return NULL;
 606}
 607
 608#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
 609static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
 610					      u32 chain_index)
 611{
 612	struct tcf_chain *chain;
 613
 614	list_for_each_entry_rcu(chain, &block->chain_list, list) {
 615		if (chain->index == chain_index)
 616			return chain;
 617	}
 618	return NULL;
 619}
 620#endif
 621
 622static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
 623			   u32 seq, u16 flags, int event, bool unicast,
 624			   struct netlink_ext_ack *extack);
 625
 626static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
 627					 u32 chain_index, bool create,
 628					 bool by_act)
 629{
 630	struct tcf_chain *chain = NULL;
 631	bool is_first_reference;
 632
 633	mutex_lock(&block->lock);
 634	chain = tcf_chain_lookup(block, chain_index);
 635	if (chain) {
 636		tcf_chain_hold(chain);
 637	} else {
 638		if (!create)
 639			goto errout;
 640		chain = tcf_chain_create(block, chain_index);
 641		if (!chain)
 642			goto errout;
 643	}
 644
 645	if (by_act)
 646		++chain->action_refcnt;
 647	is_first_reference = chain->refcnt - chain->action_refcnt == 1;
 648	mutex_unlock(&block->lock);
 649
 650	/* Send notification only in case we got the first
 651	 * non-action reference. Until then, the chain acts only as
 652	 * a placeholder for actions pointing to it and user ought
 653	 * not know about them.
 654	 */
 655	if (is_first_reference && !by_act)
 656		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
 657				RTM_NEWCHAIN, false, NULL);
 658
 659	return chain;
 660
 661errout:
 662	mutex_unlock(&block->lock);
 663	return chain;
 664}
 665
 666static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 667				       bool create)
 668{
 669	return __tcf_chain_get(block, chain_index, create, false);
 670}
 671
 672struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
 673{
 674	return __tcf_chain_get(block, chain_index, true, true);
 675}
 676EXPORT_SYMBOL(tcf_chain_get_by_act);
 677
 678static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
 679			       void *tmplt_priv);
 680static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
 681				  void *tmplt_priv, u32 chain_index,
 682				  struct tcf_block *block, struct sk_buff *oskb,
 683				  u32 seq, u16 flags);
 684
 685static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
 686			    bool explicitly_created)
 687{
 688	struct tcf_block *block = chain->block;
 689	const struct tcf_proto_ops *tmplt_ops;
 690	unsigned int refcnt, non_act_refcnt;
 691	bool free_block = false;
 
 692	void *tmplt_priv;
 693
 694	mutex_lock(&block->lock);
 695	if (explicitly_created) {
 696		if (!chain->explicitly_created) {
 697			mutex_unlock(&block->lock);
 698			return;
 699		}
 700		chain->explicitly_created = false;
 701	}
 702
 703	if (by_act)
 704		chain->action_refcnt--;
 705
 706	/* tc_chain_notify_delete can't be called while holding block lock.
 707	 * However, when block is unlocked chain can be changed concurrently, so
 708	 * save these to temporary variables.
 709	 */
 710	refcnt = --chain->refcnt;
 711	non_act_refcnt = refcnt - chain->action_refcnt;
 712	tmplt_ops = chain->tmplt_ops;
 713	tmplt_priv = chain->tmplt_priv;
 714
 715	if (non_act_refcnt == chain->explicitly_created && !by_act) {
 716		if (non_act_refcnt == 0)
 717			tc_chain_notify_delete(tmplt_ops, tmplt_priv,
 718					       chain->index, block, NULL, 0, 0);
 719		/* Last reference to chain, no need to lock. */
 720		chain->flushing = false;
 721	}
 722
 723	if (refcnt == 0)
 724		free_block = tcf_chain_detach(chain);
 725	mutex_unlock(&block->lock);
 726
 727	if (refcnt == 0) {
 728		tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
 729		tcf_chain_destroy(chain, free_block);
 730	}
 731}
 732
 733static void tcf_chain_put(struct tcf_chain *chain)
 734{
 735	__tcf_chain_put(chain, false, false);
 736}
 737
 738void tcf_chain_put_by_act(struct tcf_chain *chain)
 739{
 740	__tcf_chain_put(chain, true, false);
 741}
 742EXPORT_SYMBOL(tcf_chain_put_by_act);
 743
 744static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
 745{
 746	__tcf_chain_put(chain, false, true);
 747}
 748
 749static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
 750{
 751	struct tcf_proto *tp, *tp_next;
 752
 753	mutex_lock(&chain->filter_chain_lock);
 754	tp = tcf_chain_dereference(chain->filter_chain, chain);
 755	while (tp) {
 756		tp_next = rcu_dereference_protected(tp->next, 1);
 757		tcf_proto_signal_destroying(chain, tp);
 758		tp = tp_next;
 759	}
 760	tp = tcf_chain_dereference(chain->filter_chain, chain);
 761	RCU_INIT_POINTER(chain->filter_chain, NULL);
 762	tcf_chain0_head_change(chain, NULL);
 763	chain->flushing = true;
 764	mutex_unlock(&chain->filter_chain_lock);
 765
 766	while (tp) {
 767		tp_next = rcu_dereference_protected(tp->next, 1);
 768		tcf_proto_put(tp, rtnl_held, NULL);
 769		tp = tp_next;
 770	}
 771}
 772
 773static int tcf_block_setup(struct tcf_block *block,
 774			   struct flow_block_offload *bo);
 775
 776static void tcf_block_offload_init(struct flow_block_offload *bo,
 777				   struct net_device *dev, struct Qdisc *sch,
 778				   enum flow_block_command command,
 779				   enum flow_block_binder_type binder_type,
 780				   struct flow_block *flow_block,
 781				   bool shared, struct netlink_ext_ack *extack)
 782{
 783	bo->net = dev_net(dev);
 784	bo->command = command;
 785	bo->binder_type = binder_type;
 786	bo->block = flow_block;
 787	bo->block_shared = shared;
 788	bo->extack = extack;
 789	bo->sch = sch;
 790	bo->cb_list_head = &flow_block->cb_list;
 791	INIT_LIST_HEAD(&bo->cb_list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 792}
 793
 794static void tcf_block_unbind(struct tcf_block *block,
 795			     struct flow_block_offload *bo);
 
 
 
 
 
 
 
 796
 797static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
 
 
 
 
 798{
 799	struct tcf_block *block = block_cb->indr.data;
 800	struct net_device *dev = block_cb->indr.dev;
 801	struct Qdisc *sch = block_cb->indr.sch;
 802	struct netlink_ext_ack extack = {};
 803	struct flow_block_offload bo = {};
 
 
 
 
 804
 805	tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
 806			       block_cb->indr.binder_type,
 807			       &block->flow_block, tcf_block_shared(block),
 808			       &extack);
 809	rtnl_lock();
 810	down_write(&block->cb_lock);
 811	list_del(&block_cb->driver_list);
 812	list_move(&block_cb->list, &bo.cb_list);
 813	tcf_block_unbind(block, &bo);
 814	up_write(&block->cb_lock);
 815	rtnl_unlock();
 816}
 817
 818static bool tcf_block_offload_in_use(struct tcf_block *block)
 819{
 820	return atomic_read(&block->offloadcnt);
 821}
 822
 823static int tcf_block_offload_cmd(struct tcf_block *block,
 824				 struct net_device *dev, struct Qdisc *sch,
 825				 struct tcf_block_ext_info *ei,
 826				 enum flow_block_command command,
 827				 struct netlink_ext_ack *extack)
 828{
 829	struct flow_block_offload bo = {};
 
 830
 831	tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
 832			       &block->flow_block, tcf_block_shared(block),
 833			       extack);
 
 
 
 
 834
 835	if (dev->netdev_ops->ndo_setup_tc) {
 836		int err;
 837
 838		err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 839		if (err < 0) {
 840			if (err != -EOPNOTSUPP)
 841				NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
 842			return err;
 843		}
 844
 845		return tcf_block_setup(block, &bo);
 846	}
 847
 848	flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
 849				    tc_block_indr_cleanup);
 850	tcf_block_setup(block, &bo);
 851
 852	return -EOPNOTSUPP;
 853}
 854
 855static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
 856				  struct tcf_block_ext_info *ei,
 857				  struct netlink_ext_ack *extack)
 858{
 859	struct net_device *dev = q->dev_queue->dev;
 860	int err;
 861
 862	down_write(&block->cb_lock);
 
 
 863
 864	/* If tc offload feature is disabled and the block we try to bind
 865	 * to already has some offloaded filters, forbid to bind.
 866	 */
 867	if (dev->netdev_ops->ndo_setup_tc &&
 868	    !tc_can_offload(dev) &&
 869	    tcf_block_offload_in_use(block)) {
 870		NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
 871		err = -EOPNOTSUPP;
 872		goto err_unlock;
 873	}
 874
 875	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
 876	if (err == -EOPNOTSUPP)
 877		goto no_offload_dev_inc;
 878	if (err)
 879		goto err_unlock;
 880
 
 881	up_write(&block->cb_lock);
 882	return 0;
 883
 884no_offload_dev_inc:
 885	if (tcf_block_offload_in_use(block))
 
 886		goto err_unlock;
 887
 888	err = 0;
 889	block->nooffloaddevcnt++;
 
 890err_unlock:
 891	up_write(&block->cb_lock);
 892	return err;
 893}
 894
 895static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 896				     struct tcf_block_ext_info *ei)
 897{
 898	struct net_device *dev = q->dev_queue->dev;
 899	int err;
 900
 901	down_write(&block->cb_lock);
 902	err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
 
 
 
 
 903	if (err == -EOPNOTSUPP)
 904		goto no_offload_dev_dec;
 905	up_write(&block->cb_lock);
 906	return;
 907
 908no_offload_dev_dec:
 909	WARN_ON(block->nooffloaddevcnt-- == 0);
 910	up_write(&block->cb_lock);
 911}
 912
 913static int
 914tcf_chain0_head_change_cb_add(struct tcf_block *block,
 915			      struct tcf_block_ext_info *ei,
 916			      struct netlink_ext_ack *extack)
 917{
 918	struct tcf_filter_chain_list_item *item;
 919	struct tcf_chain *chain0;
 920
 921	item = kmalloc(sizeof(*item), GFP_KERNEL);
 922	if (!item) {
 923		NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
 924		return -ENOMEM;
 925	}
 926	item->chain_head_change = ei->chain_head_change;
 927	item->chain_head_change_priv = ei->chain_head_change_priv;
 928
 929	mutex_lock(&block->lock);
 930	chain0 = block->chain0.chain;
 931	if (chain0)
 932		tcf_chain_hold(chain0);
 933	else
 934		list_add(&item->list, &block->chain0.filter_chain_list);
 935	mutex_unlock(&block->lock);
 936
 937	if (chain0) {
 938		struct tcf_proto *tp_head;
 939
 940		mutex_lock(&chain0->filter_chain_lock);
 941
 942		tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
 943		if (tp_head)
 944			tcf_chain_head_change_item(item, tp_head);
 945
 946		mutex_lock(&block->lock);
 947		list_add(&item->list, &block->chain0.filter_chain_list);
 948		mutex_unlock(&block->lock);
 949
 950		mutex_unlock(&chain0->filter_chain_lock);
 951		tcf_chain_put(chain0);
 952	}
 953
 954	return 0;
 955}
 956
 957static void
 958tcf_chain0_head_change_cb_del(struct tcf_block *block,
 959			      struct tcf_block_ext_info *ei)
 960{
 961	struct tcf_filter_chain_list_item *item;
 962
 963	mutex_lock(&block->lock);
 964	list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
 965		if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
 966		    (item->chain_head_change == ei->chain_head_change &&
 967		     item->chain_head_change_priv == ei->chain_head_change_priv)) {
 968			if (block->chain0.chain)
 969				tcf_chain_head_change_item(item, NULL);
 970			list_del(&item->list);
 971			mutex_unlock(&block->lock);
 972
 973			kfree(item);
 974			return;
 975		}
 976	}
 977	mutex_unlock(&block->lock);
 978	WARN_ON(1);
 979}
 980
 981struct tcf_net {
 982	spinlock_t idr_lock; /* Protects idr */
 983	struct idr idr;
 984};
 985
 986static unsigned int tcf_net_id;
 987
 988static int tcf_block_insert(struct tcf_block *block, struct net *net,
 989			    struct netlink_ext_ack *extack)
 990{
 991	struct tcf_net *tn = net_generic(net, tcf_net_id);
 992	int err;
 993
 994	idr_preload(GFP_KERNEL);
 995	spin_lock(&tn->idr_lock);
 996	err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
 997			    GFP_NOWAIT);
 998	spin_unlock(&tn->idr_lock);
 999	idr_preload_end();
1000
1001	return err;
1002}
1003
1004static void tcf_block_remove(struct tcf_block *block, struct net *net)
1005{
1006	struct tcf_net *tn = net_generic(net, tcf_net_id);
1007
1008	spin_lock(&tn->idr_lock);
1009	idr_remove(&tn->idr, block->index);
1010	spin_unlock(&tn->idr_lock);
1011}
1012
1013static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
1014					  u32 block_index,
1015					  struct netlink_ext_ack *extack)
1016{
1017	struct tcf_block *block;
1018
1019	block = kzalloc(sizeof(*block), GFP_KERNEL);
1020	if (!block) {
1021		NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
1022		return ERR_PTR(-ENOMEM);
1023	}
1024	mutex_init(&block->lock);
1025	mutex_init(&block->proto_destroy_lock);
1026	init_rwsem(&block->cb_lock);
1027	flow_block_init(&block->flow_block);
1028	INIT_LIST_HEAD(&block->chain_list);
1029	INIT_LIST_HEAD(&block->owner_list);
1030	INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1031
1032	refcount_set(&block->refcnt, 1);
1033	block->net = net;
1034	block->index = block_index;
1035	xa_init(&block->ports);
1036
1037	/* Don't store q pointer for blocks which are shared */
1038	if (!tcf_block_shared(block))
1039		block->q = q;
1040	return block;
1041}
1042
1043struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
1044{
1045	struct tcf_net *tn = net_generic(net, tcf_net_id);
1046
1047	return idr_find(&tn->idr, block_index);
1048}
1049EXPORT_SYMBOL(tcf_block_lookup);
1050
1051static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
1052{
1053	struct tcf_block *block;
1054
1055	rcu_read_lock();
1056	block = tcf_block_lookup(net, block_index);
1057	if (block && !refcount_inc_not_zero(&block->refcnt))
1058		block = NULL;
1059	rcu_read_unlock();
1060
1061	return block;
1062}
1063
1064static struct tcf_chain *
1065__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1066{
1067	mutex_lock(&block->lock);
1068	if (chain)
1069		chain = list_is_last(&chain->list, &block->chain_list) ?
1070			NULL : list_next_entry(chain, list);
1071	else
1072		chain = list_first_entry_or_null(&block->chain_list,
1073						 struct tcf_chain, list);
1074
1075	/* skip all action-only chains */
1076	while (chain && tcf_chain_held_by_acts_only(chain))
1077		chain = list_is_last(&chain->list, &block->chain_list) ?
1078			NULL : list_next_entry(chain, list);
1079
1080	if (chain)
1081		tcf_chain_hold(chain);
1082	mutex_unlock(&block->lock);
1083
1084	return chain;
1085}
1086
1087/* Function to be used by all clients that want to iterate over all chains on
1088 * block. It properly obtains block->lock and takes reference to chain before
1089 * returning it. Users of this function must be tolerant to concurrent chain
1090 * insertion/deletion or ensure that no concurrent chain modification is
1091 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1092 * consistent dump because rtnl lock is released each time skb is filled with
1093 * data and sent to user-space.
1094 */
1095
1096struct tcf_chain *
1097tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1098{
1099	struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1100
1101	if (chain)
1102		tcf_chain_put(chain);
1103
1104	return chain_next;
1105}
1106EXPORT_SYMBOL(tcf_get_next_chain);
1107
1108static struct tcf_proto *
1109__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1110{
1111	u32 prio = 0;
1112
1113	ASSERT_RTNL();
1114	mutex_lock(&chain->filter_chain_lock);
1115
1116	if (!tp) {
1117		tp = tcf_chain_dereference(chain->filter_chain, chain);
1118	} else if (tcf_proto_is_deleting(tp)) {
1119		/* 'deleting' flag is set and chain->filter_chain_lock was
1120		 * unlocked, which means next pointer could be invalid. Restart
1121		 * search.
1122		 */
1123		prio = tp->prio + 1;
1124		tp = tcf_chain_dereference(chain->filter_chain, chain);
1125
1126		for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1127			if (!tp->deleting && tp->prio >= prio)
1128				break;
1129	} else {
1130		tp = tcf_chain_dereference(tp->next, chain);
1131	}
1132
1133	if (tp)
1134		tcf_proto_get(tp);
1135
1136	mutex_unlock(&chain->filter_chain_lock);
1137
1138	return tp;
1139}
1140
1141/* Function to be used by all clients that want to iterate over all tp's on
1142 * chain. Users of this function must be tolerant to concurrent tp
1143 * insertion/deletion or ensure that no concurrent chain modification is
1144 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1145 * consistent dump because rtnl lock is released each time skb is filled with
1146 * data and sent to user-space.
1147 */
1148
1149struct tcf_proto *
1150tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
 
1151{
1152	struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1153
1154	if (tp)
1155		tcf_proto_put(tp, true, NULL);
1156
1157	return tp_next;
1158}
1159EXPORT_SYMBOL(tcf_get_next_proto);
1160
1161static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1162{
1163	struct tcf_chain *chain;
1164
1165	/* Last reference to block. At this point chains cannot be added or
1166	 * removed concurrently.
1167	 */
1168	for (chain = tcf_get_next_chain(block, NULL);
1169	     chain;
1170	     chain = tcf_get_next_chain(block, chain)) {
1171		tcf_chain_put_explicitly_created(chain);
1172		tcf_chain_flush(chain, rtnl_held);
1173	}
1174}
1175
1176/* Lookup Qdisc and increments its reference counter.
1177 * Set parent, if necessary.
1178 */
1179
1180static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1181			    u32 *parent, int ifindex, bool rtnl_held,
1182			    struct netlink_ext_ack *extack)
1183{
1184	const struct Qdisc_class_ops *cops;
1185	struct net_device *dev;
1186	int err = 0;
1187
1188	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1189		return 0;
1190
1191	rcu_read_lock();
1192
1193	/* Find link */
1194	dev = dev_get_by_index_rcu(net, ifindex);
1195	if (!dev) {
1196		rcu_read_unlock();
1197		return -ENODEV;
1198	}
1199
1200	/* Find qdisc */
1201	if (!*parent) {
1202		*q = rcu_dereference(dev->qdisc);
1203		*parent = (*q)->handle;
1204	} else {
1205		*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1206		if (!*q) {
1207			NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1208			err = -EINVAL;
1209			goto errout_rcu;
1210		}
1211	}
1212
1213	*q = qdisc_refcount_inc_nz(*q);
1214	if (!*q) {
1215		NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1216		err = -EINVAL;
1217		goto errout_rcu;
1218	}
1219
1220	/* Is it classful? */
1221	cops = (*q)->ops->cl_ops;
1222	if (!cops) {
1223		NL_SET_ERR_MSG(extack, "Qdisc not classful");
1224		err = -EINVAL;
1225		goto errout_qdisc;
1226	}
1227
1228	if (!cops->tcf_block) {
1229		NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1230		err = -EOPNOTSUPP;
1231		goto errout_qdisc;
1232	}
1233
1234errout_rcu:
1235	/* At this point we know that qdisc is not noop_qdisc,
1236	 * which means that qdisc holds a reference to net_device
1237	 * and we hold a reference to qdisc, so it is safe to release
1238	 * rcu read lock.
1239	 */
1240	rcu_read_unlock();
1241	return err;
1242
1243errout_qdisc:
1244	rcu_read_unlock();
1245
1246	if (rtnl_held)
1247		qdisc_put(*q);
1248	else
1249		qdisc_put_unlocked(*q);
1250	*q = NULL;
1251
1252	return err;
1253}
1254
1255static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1256			       int ifindex, struct netlink_ext_ack *extack)
1257{
1258	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1259		return 0;
1260
1261	/* Do we search for filter, attached to class? */
1262	if (TC_H_MIN(parent)) {
1263		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1264
1265		*cl = cops->find(q, parent);
1266		if (*cl == 0) {
1267			NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1268			return -ENOENT;
1269		}
1270	}
1271
1272	return 0;
1273}
1274
1275static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1276					  unsigned long cl, int ifindex,
1277					  u32 block_index,
1278					  struct netlink_ext_ack *extack)
1279{
1280	struct tcf_block *block;
1281
1282	if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1283		block = tcf_block_refcnt_get(net, block_index);
1284		if (!block) {
1285			NL_SET_ERR_MSG(extack, "Block of given index was not found");
1286			return ERR_PTR(-EINVAL);
1287		}
1288	} else {
1289		const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1290
1291		block = cops->tcf_block(q, cl, extack);
1292		if (!block)
1293			return ERR_PTR(-EINVAL);
1294
1295		if (tcf_block_shared(block)) {
1296			NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1297			return ERR_PTR(-EOPNOTSUPP);
1298		}
1299
1300		/* Always take reference to block in order to support execution
1301		 * of rules update path of cls API without rtnl lock. Caller
1302		 * must release block when it is finished using it. 'if' block
1303		 * of this conditional obtain reference to block by calling
1304		 * tcf_block_refcnt_get().
1305		 */
1306		refcount_inc(&block->refcnt);
1307	}
1308
1309	return block;
1310}
1311
1312static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1313			    struct tcf_block_ext_info *ei, bool rtnl_held)
1314{
1315	if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1316		/* Flushing/putting all chains will cause the block to be
1317		 * deallocated when last chain is freed. However, if chain_list
1318		 * is empty, block has to be manually deallocated. After block
1319		 * reference counter reached 0, it is no longer possible to
1320		 * increment it or add new chains to block.
1321		 */
1322		bool free_block = list_empty(&block->chain_list);
1323
1324		mutex_unlock(&block->lock);
1325		if (tcf_block_shared(block))
1326			tcf_block_remove(block, block->net);
1327
1328		if (q)
1329			tcf_block_offload_unbind(block, q, ei);
1330
1331		if (free_block)
1332			tcf_block_destroy(block);
1333		else
1334			tcf_block_flush_all_chains(block, rtnl_held);
1335	} else if (q) {
1336		tcf_block_offload_unbind(block, q, ei);
1337	}
1338}
1339
1340static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1341{
1342	__tcf_block_put(block, NULL, NULL, rtnl_held);
1343}
1344
1345/* Find tcf block.
1346 * Set q, parent, cl when appropriate.
1347 */
1348
1349static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1350					u32 *parent, unsigned long *cl,
1351					int ifindex, u32 block_index,
1352					struct netlink_ext_ack *extack)
1353{
1354	struct tcf_block *block;
1355	int err = 0;
1356
1357	ASSERT_RTNL();
1358
1359	err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1360	if (err)
1361		goto errout;
1362
1363	err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1364	if (err)
1365		goto errout_qdisc;
1366
1367	block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1368	if (IS_ERR(block)) {
1369		err = PTR_ERR(block);
1370		goto errout_qdisc;
1371	}
1372
1373	return block;
1374
1375errout_qdisc:
1376	if (*q)
1377		qdisc_put(*q);
1378errout:
1379	*q = NULL;
1380	return ERR_PTR(err);
1381}
1382
1383static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1384			      bool rtnl_held)
1385{
1386	if (!IS_ERR_OR_NULL(block))
1387		tcf_block_refcnt_put(block, rtnl_held);
1388
1389	if (q) {
1390		if (rtnl_held)
1391			qdisc_put(q);
1392		else
1393			qdisc_put_unlocked(q);
1394	}
1395}
1396
1397struct tcf_block_owner_item {
1398	struct list_head list;
1399	struct Qdisc *q;
1400	enum flow_block_binder_type binder_type;
1401};
1402
1403static void
1404tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1405			       struct Qdisc *q,
1406			       enum flow_block_binder_type binder_type)
1407{
1408	if (block->keep_dst &&
1409	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1410	    binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1411		netif_keep_dst(qdisc_dev(q));
1412}
1413
1414void tcf_block_netif_keep_dst(struct tcf_block *block)
1415{
1416	struct tcf_block_owner_item *item;
1417
1418	block->keep_dst = true;
1419	list_for_each_entry(item, &block->owner_list, list)
1420		tcf_block_owner_netif_keep_dst(block, item->q,
1421					       item->binder_type);
1422}
1423EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1424
1425static int tcf_block_owner_add(struct tcf_block *block,
1426			       struct Qdisc *q,
1427			       enum flow_block_binder_type binder_type)
1428{
1429	struct tcf_block_owner_item *item;
1430
1431	item = kmalloc(sizeof(*item), GFP_KERNEL);
1432	if (!item)
1433		return -ENOMEM;
1434	item->q = q;
1435	item->binder_type = binder_type;
1436	list_add(&item->list, &block->owner_list);
1437	return 0;
1438}
1439
1440static void tcf_block_owner_del(struct tcf_block *block,
1441				struct Qdisc *q,
1442				enum flow_block_binder_type binder_type)
1443{
1444	struct tcf_block_owner_item *item;
1445
1446	list_for_each_entry(item, &block->owner_list, list) {
1447		if (item->q == q && item->binder_type == binder_type) {
1448			list_del(&item->list);
1449			kfree(item);
1450			return;
1451		}
1452	}
1453	WARN_ON(1);
1454}
1455
1456static bool tcf_block_tracks_dev(struct tcf_block *block,
1457				 struct tcf_block_ext_info *ei)
1458{
1459	return tcf_block_shared(block) &&
1460	       (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS ||
1461		ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS);
1462}
1463
1464int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1465		      struct tcf_block_ext_info *ei,
1466		      struct netlink_ext_ack *extack)
1467{
1468	struct net_device *dev = qdisc_dev(q);
1469	struct net *net = qdisc_net(q);
1470	struct tcf_block *block = NULL;
1471	int err;
1472
1473	if (ei->block_index)
1474		/* block_index not 0 means the shared block is requested */
1475		block = tcf_block_refcnt_get(net, ei->block_index);
1476
1477	if (!block) {
1478		block = tcf_block_create(net, q, ei->block_index, extack);
1479		if (IS_ERR(block))
1480			return PTR_ERR(block);
1481		if (tcf_block_shared(block)) {
1482			err = tcf_block_insert(block, net, extack);
1483			if (err)
1484				goto err_block_insert;
1485		}
1486	}
1487
1488	err = tcf_block_owner_add(block, q, ei->binder_type);
1489	if (err)
1490		goto err_block_owner_add;
1491
1492	tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1493
1494	err = tcf_chain0_head_change_cb_add(block, ei, extack);
1495	if (err)
1496		goto err_chain0_head_change_cb_add;
1497
1498	err = tcf_block_offload_bind(block, q, ei, extack);
1499	if (err)
1500		goto err_block_offload_bind;
1501
1502	if (tcf_block_tracks_dev(block, ei)) {
1503		err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1504		if (err) {
1505			NL_SET_ERR_MSG(extack, "block dev insert failed");
1506			goto err_dev_insert;
1507		}
1508	}
1509
1510	*p_block = block;
1511	return 0;
1512
1513err_dev_insert:
1514	tcf_block_offload_unbind(block, q, ei);
1515err_block_offload_bind:
1516	tcf_chain0_head_change_cb_del(block, ei);
1517err_chain0_head_change_cb_add:
1518	tcf_block_owner_del(block, q, ei->binder_type);
1519err_block_owner_add:
1520err_block_insert:
1521	tcf_block_refcnt_put(block, true);
1522	return err;
1523}
1524EXPORT_SYMBOL(tcf_block_get_ext);
1525
1526static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1527{
1528	struct tcf_proto __rcu **p_filter_chain = priv;
1529
1530	rcu_assign_pointer(*p_filter_chain, tp_head);
1531}
1532
1533int tcf_block_get(struct tcf_block **p_block,
1534		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1535		  struct netlink_ext_ack *extack)
1536{
1537	struct tcf_block_ext_info ei = {
1538		.chain_head_change = tcf_chain_head_change_dflt,
1539		.chain_head_change_priv = p_filter_chain,
1540	};
1541
1542	WARN_ON(!p_filter_chain);
1543	return tcf_block_get_ext(p_block, q, &ei, extack);
1544}
1545EXPORT_SYMBOL(tcf_block_get);
1546
1547/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1548 * actions should be all removed after flushing.
1549 */
1550void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1551		       struct tcf_block_ext_info *ei)
1552{
1553	struct net_device *dev = qdisc_dev(q);
1554
1555	if (!block)
1556		return;
1557	if (tcf_block_tracks_dev(block, ei))
1558		xa_erase(&block->ports, dev->ifindex);
1559	tcf_chain0_head_change_cb_del(block, ei);
1560	tcf_block_owner_del(block, q, ei->binder_type);
1561
1562	__tcf_block_put(block, q, ei, true);
1563}
1564EXPORT_SYMBOL(tcf_block_put_ext);
1565
1566void tcf_block_put(struct tcf_block *block)
1567{
1568	struct tcf_block_ext_info ei = {0, };
1569
1570	if (!block)
1571		return;
1572	tcf_block_put_ext(block, block->q, &ei);
1573}
1574
1575EXPORT_SYMBOL(tcf_block_put);
1576
1577static int
1578tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1579			    void *cb_priv, bool add, bool offload_in_use,
1580			    struct netlink_ext_ack *extack)
1581{
1582	struct tcf_chain *chain, *chain_prev;
1583	struct tcf_proto *tp, *tp_prev;
1584	int err;
1585
1586	lockdep_assert_held(&block->cb_lock);
1587
1588	for (chain = __tcf_get_next_chain(block, NULL);
1589	     chain;
1590	     chain_prev = chain,
1591		     chain = __tcf_get_next_chain(block, chain),
1592		     tcf_chain_put(chain_prev)) {
1593		if (chain->tmplt_ops && add)
1594			chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
1595							  cb_priv);
1596		for (tp = __tcf_get_next_proto(chain, NULL); tp;
1597		     tp_prev = tp,
1598			     tp = __tcf_get_next_proto(chain, tp),
1599			     tcf_proto_put(tp_prev, true, NULL)) {
1600			if (tp->ops->reoffload) {
1601				err = tp->ops->reoffload(tp, add, cb, cb_priv,
1602							 extack);
1603				if (err && add)
1604					goto err_playback_remove;
1605			} else if (add && offload_in_use) {
1606				err = -EOPNOTSUPP;
1607				NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1608				goto err_playback_remove;
1609			}
1610		}
1611		if (chain->tmplt_ops && !add)
1612			chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
1613							  cb_priv);
1614	}
1615
1616	return 0;
1617
1618err_playback_remove:
1619	tcf_proto_put(tp, true, NULL);
1620	tcf_chain_put(chain);
1621	tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1622				    extack);
1623	return err;
1624}
1625
1626static int tcf_block_bind(struct tcf_block *block,
1627			  struct flow_block_offload *bo)
1628{
1629	struct flow_block_cb *block_cb, *next;
1630	int err, i = 0;
1631
1632	lockdep_assert_held(&block->cb_lock);
1633
1634	list_for_each_entry(block_cb, &bo->cb_list, list) {
1635		err = tcf_block_playback_offloads(block, block_cb->cb,
1636						  block_cb->cb_priv, true,
1637						  tcf_block_offload_in_use(block),
1638						  bo->extack);
1639		if (err)
1640			goto err_unroll;
1641		if (!bo->unlocked_driver_cb)
1642			block->lockeddevcnt++;
1643
1644		i++;
1645	}
1646	list_splice(&bo->cb_list, &block->flow_block.cb_list);
1647
1648	return 0;
1649
1650err_unroll:
1651	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1652		list_del(&block_cb->driver_list);
1653		if (i-- > 0) {
1654			list_del(&block_cb->list);
1655			tcf_block_playback_offloads(block, block_cb->cb,
1656						    block_cb->cb_priv, false,
1657						    tcf_block_offload_in_use(block),
1658						    NULL);
1659			if (!bo->unlocked_driver_cb)
1660				block->lockeddevcnt--;
1661		}
1662		flow_block_cb_free(block_cb);
1663	}
1664
1665	return err;
1666}
1667
1668static void tcf_block_unbind(struct tcf_block *block,
1669			     struct flow_block_offload *bo)
1670{
1671	struct flow_block_cb *block_cb, *next;
1672
1673	lockdep_assert_held(&block->cb_lock);
1674
1675	list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1676		tcf_block_playback_offloads(block, block_cb->cb,
1677					    block_cb->cb_priv, false,
1678					    tcf_block_offload_in_use(block),
1679					    NULL);
1680		list_del(&block_cb->list);
1681		flow_block_cb_free(block_cb);
1682		if (!bo->unlocked_driver_cb)
1683			block->lockeddevcnt--;
1684	}
1685}
1686
1687static int tcf_block_setup(struct tcf_block *block,
1688			   struct flow_block_offload *bo)
1689{
1690	int err;
1691
1692	switch (bo->command) {
1693	case FLOW_BLOCK_BIND:
1694		err = tcf_block_bind(block, bo);
1695		break;
1696	case FLOW_BLOCK_UNBIND:
1697		err = 0;
1698		tcf_block_unbind(block, bo);
1699		break;
1700	default:
1701		WARN_ON_ONCE(1);
1702		err = -EOPNOTSUPP;
1703	}
1704
1705	return err;
1706}
1707
1708/* Main classifier routine: scans classifier chain attached
1709 * to this qdisc, (optionally) tests for protocol and asks
1710 * specific classifiers.
1711 */
1712static inline int __tcf_classify(struct sk_buff *skb,
1713				 const struct tcf_proto *tp,
1714				 const struct tcf_proto *orig_tp,
1715				 struct tcf_result *res,
1716				 bool compat_mode,
1717				 struct tcf_exts_miss_cookie_node *n,
1718				 int act_index,
1719				 u32 *last_executed_chain)
1720{
1721#ifdef CONFIG_NET_CLS_ACT
1722	const int max_reclassify_loop = 16;
 
1723	const struct tcf_proto *first_tp;
1724	int limit = 0;
1725
1726reclassify:
1727#endif
1728	for (; tp; tp = rcu_dereference_bh(tp->next)) {
1729		__be16 protocol = skb_protocol(skb, false);
1730		int err = 0;
1731
1732		if (n) {
1733			struct tcf_exts *exts;
1734
1735			if (n->tp_prio != tp->prio)
1736				continue;
1737
1738			/* We re-lookup the tp and chain based on index instead
1739			 * of having hard refs and locks to them, so do a sanity
1740			 * check if any of tp,chain,exts was replaced by the
1741			 * time we got here with a cookie from hardware.
1742			 */
1743			if (unlikely(n->tp != tp || n->tp->chain != n->chain ||
1744				     !tp->ops->get_exts)) {
1745				tcf_set_drop_reason(skb,
1746						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1747				return TC_ACT_SHOT;
1748			}
1749
1750			exts = tp->ops->get_exts(tp, n->handle);
1751			if (unlikely(!exts || n->exts != exts)) {
1752				tcf_set_drop_reason(skb,
1753						    SKB_DROP_REASON_TC_COOKIE_ERROR);
1754				return TC_ACT_SHOT;
1755			}
1756
1757			n = NULL;
1758			err = tcf_exts_exec_ex(skb, exts, act_index, res);
1759		} else {
1760			if (tp->protocol != protocol &&
1761			    tp->protocol != htons(ETH_P_ALL))
1762				continue;
1763
1764			err = tc_classify(skb, tp, res);
1765		}
1766#ifdef CONFIG_NET_CLS_ACT
1767		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1768			first_tp = orig_tp;
1769			*last_executed_chain = first_tp->chain->index;
1770			goto reset;
1771		} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1772			first_tp = res->goto_tp;
1773			*last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
 
 
 
 
 
 
 
 
 
 
 
1774			goto reset;
1775		}
1776#endif
1777		if (err >= 0)
1778			return err;
1779	}
1780
1781	if (unlikely(n)) {
1782		tcf_set_drop_reason(skb,
1783				    SKB_DROP_REASON_TC_COOKIE_ERROR);
1784		return TC_ACT_SHOT;
1785	}
1786
1787	return TC_ACT_UNSPEC; /* signal: continue lookup */
1788#ifdef CONFIG_NET_CLS_ACT
1789reset:
1790	if (unlikely(limit++ >= max_reclassify_loop)) {
1791		net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1792				       tp->chain->block->index,
1793				       tp->prio & 0xffff,
1794				       ntohs(tp->protocol));
1795		tcf_set_drop_reason(skb,
1796				    SKB_DROP_REASON_TC_RECLASSIFY_LOOP);
1797		return TC_ACT_SHOT;
1798	}
1799
1800	tp = first_tp;
1801	goto reclassify;
1802#endif
1803}
1804
1805int tcf_classify(struct sk_buff *skb,
1806		 const struct tcf_block *block,
1807		 const struct tcf_proto *tp,
1808		 struct tcf_result *res, bool compat_mode)
1809{
1810#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1811	u32 last_executed_chain = 0;
1812
1813	return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0,
1814			      &last_executed_chain);
1815#else
1816	u32 last_executed_chain = tp ? tp->chain->index : 0;
1817	struct tcf_exts_miss_cookie_node *n = NULL;
1818	const struct tcf_proto *orig_tp = tp;
1819	struct tc_skb_ext *ext;
1820	int act_index = 0;
1821	int ret;
1822
1823	if (block) {
1824		ext = skb_ext_find(skb, TC_SKB_EXT);
1825
1826		if (ext && (ext->chain || ext->act_miss)) {
1827			struct tcf_chain *fchain;
1828			u32 chain;
1829
1830			if (ext->act_miss) {
1831				n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie,
1832								&act_index);
1833				if (!n) {
1834					tcf_set_drop_reason(skb,
1835							    SKB_DROP_REASON_TC_COOKIE_ERROR);
1836					return TC_ACT_SHOT;
1837				}
1838
1839				chain = n->chain_index;
1840			} else {
1841				chain = ext->chain;
1842			}
1843
1844			fchain = tcf_chain_lookup_rcu(block, chain);
1845			if (!fchain) {
1846				tcf_set_drop_reason(skb,
1847						    SKB_DROP_REASON_TC_CHAIN_NOTFOUND);
1848
1849				return TC_ACT_SHOT;
1850			}
1851
1852			/* Consume, so cloned/redirect skbs won't inherit ext */
1853			skb_ext_del(skb, TC_SKB_EXT);
1854
1855			tp = rcu_dereference_bh(fchain->filter_chain);
1856			last_executed_chain = fchain->index;
1857		}
1858	}
1859
1860	ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index,
1861			     &last_executed_chain);
1862
1863	if (tc_skb_ext_tc_enabled()) {
1864		/* If we missed on some chain */
1865		if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1866			struct tc_skb_cb *cb = tc_skb_cb(skb);
1867
1868			ext = tc_skb_ext_alloc(skb);
1869			if (WARN_ON_ONCE(!ext)) {
1870				tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM);
1871				return TC_ACT_SHOT;
1872			}
1873			ext->chain = last_executed_chain;
1874			ext->mru = cb->mru;
1875			ext->post_ct = cb->post_ct;
1876			ext->post_ct_snat = cb->post_ct_snat;
1877			ext->post_ct_dnat = cb->post_ct_dnat;
1878			ext->zone = cb->zone;
1879		}
1880	}
1881
1882	return ret;
1883#endif
1884}
1885EXPORT_SYMBOL(tcf_classify);
1886
1887struct tcf_chain_info {
1888	struct tcf_proto __rcu **pprev;
1889	struct tcf_proto __rcu *next;
1890};
1891
1892static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1893					   struct tcf_chain_info *chain_info)
1894{
1895	return tcf_chain_dereference(*chain_info->pprev, chain);
1896}
1897
1898static int tcf_chain_tp_insert(struct tcf_chain *chain,
1899			       struct tcf_chain_info *chain_info,
1900			       struct tcf_proto *tp)
1901{
1902	if (chain->flushing)
1903		return -EAGAIN;
1904
1905	RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1906	if (*chain_info->pprev == chain->filter_chain)
1907		tcf_chain0_head_change(chain, tp);
1908	tcf_proto_get(tp);
 
1909	rcu_assign_pointer(*chain_info->pprev, tp);
1910
1911	return 0;
1912}
1913
1914static void tcf_chain_tp_remove(struct tcf_chain *chain,
1915				struct tcf_chain_info *chain_info,
1916				struct tcf_proto *tp)
1917{
1918	struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1919
1920	tcf_proto_mark_delete(tp);
1921	if (tp == chain->filter_chain)
1922		tcf_chain0_head_change(chain, next);
1923	RCU_INIT_POINTER(*chain_info->pprev, next);
1924}
1925
1926static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1927					   struct tcf_chain_info *chain_info,
1928					   u32 protocol, u32 prio,
1929					   bool prio_allocate,
1930					   struct netlink_ext_ack *extack);
1931
1932/* Try to insert new proto.
1933 * If proto with specified priority already exists, free new proto
1934 * and return existing one.
1935 */
1936
1937static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1938						    struct tcf_proto *tp_new,
1939						    u32 protocol, u32 prio,
1940						    bool rtnl_held)
1941{
1942	struct tcf_chain_info chain_info;
1943	struct tcf_proto *tp;
1944	int err = 0;
1945
1946	mutex_lock(&chain->filter_chain_lock);
1947
1948	if (tcf_proto_exists_destroying(chain, tp_new)) {
1949		mutex_unlock(&chain->filter_chain_lock);
1950		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1951		return ERR_PTR(-EAGAIN);
1952	}
1953
1954	tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL);
 
1955	if (!tp)
1956		err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1957	mutex_unlock(&chain->filter_chain_lock);
1958
1959	if (tp) {
1960		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1961		tp_new = tp;
1962	} else if (err) {
1963		tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1964		tp_new = ERR_PTR(err);
1965	}
1966
1967	return tp_new;
1968}
1969
1970static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1971				      struct tcf_proto *tp, bool rtnl_held,
1972				      struct netlink_ext_ack *extack)
1973{
1974	struct tcf_chain_info chain_info;
1975	struct tcf_proto *tp_iter;
1976	struct tcf_proto **pprev;
1977	struct tcf_proto *next;
1978
1979	mutex_lock(&chain->filter_chain_lock);
1980
1981	/* Atomically find and remove tp from chain. */
1982	for (pprev = &chain->filter_chain;
1983	     (tp_iter = tcf_chain_dereference(*pprev, chain));
1984	     pprev = &tp_iter->next) {
1985		if (tp_iter == tp) {
1986			chain_info.pprev = pprev;
1987			chain_info.next = tp_iter->next;
1988			WARN_ON(tp_iter->deleting);
1989			break;
1990		}
1991	}
1992	/* Verify that tp still exists and no new filters were inserted
1993	 * concurrently.
1994	 * Mark tp for deletion if it is empty.
1995	 */
1996	if (!tp_iter || !tcf_proto_check_delete(tp)) {
1997		mutex_unlock(&chain->filter_chain_lock);
1998		return;
1999	}
2000
2001	tcf_proto_signal_destroying(chain, tp);
2002	next = tcf_chain_dereference(chain_info.next, chain);
2003	if (tp == chain->filter_chain)
2004		tcf_chain0_head_change(chain, next);
2005	RCU_INIT_POINTER(*chain_info.pprev, next);
2006	mutex_unlock(&chain->filter_chain_lock);
2007
2008	tcf_proto_put(tp, rtnl_held, extack);
2009}
2010
2011static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
2012					   struct tcf_chain_info *chain_info,
2013					   u32 protocol, u32 prio,
2014					   bool prio_allocate,
2015					   struct netlink_ext_ack *extack)
2016{
2017	struct tcf_proto **pprev;
2018	struct tcf_proto *tp;
2019
2020	/* Check the chain for existence of proto-tcf with this priority */
2021	for (pprev = &chain->filter_chain;
2022	     (tp = tcf_chain_dereference(*pprev, chain));
2023	     pprev = &tp->next) {
2024		if (tp->prio >= prio) {
2025			if (tp->prio == prio) {
2026				if (prio_allocate) {
2027					NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use");
2028					return ERR_PTR(-ENOSPC);
2029				}
2030				if (tp->protocol != protocol && protocol) {
2031					NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority");
2032					return ERR_PTR(-EINVAL);
2033				}
2034			} else {
2035				tp = NULL;
2036			}
2037			break;
2038		}
2039	}
2040	chain_info->pprev = pprev;
2041	if (tp) {
2042		chain_info->next = tp->next;
2043		tcf_proto_get(tp);
2044	} else {
2045		chain_info->next = NULL;
2046	}
2047	return tp;
2048}
2049
2050static int tcf_fill_node(struct net *net, struct sk_buff *skb,
2051			 struct tcf_proto *tp, struct tcf_block *block,
2052			 struct Qdisc *q, u32 parent, void *fh,
2053			 u32 portid, u32 seq, u16 flags, int event,
2054			 bool terse_dump, bool rtnl_held,
2055			 struct netlink_ext_ack *extack)
2056{
2057	struct tcmsg *tcm;
2058	struct nlmsghdr  *nlh;
2059	unsigned char *b = skb_tail_pointer(skb);
2060
2061	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2062	if (!nlh)
2063		goto out_nlmsg_trim;
2064	tcm = nlmsg_data(nlh);
2065	tcm->tcm_family = AF_UNSPEC;
2066	tcm->tcm__pad1 = 0;
2067	tcm->tcm__pad2 = 0;
2068	if (q) {
2069		tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
2070		tcm->tcm_parent = parent;
2071	} else {
2072		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2073		tcm->tcm_block_index = block->index;
2074	}
2075	tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
2076	if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
2077		goto nla_put_failure;
2078	if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
2079		goto nla_put_failure;
2080	if (!fh) {
2081		tcm->tcm_handle = 0;
2082	} else if (terse_dump) {
2083		if (tp->ops->terse_dump) {
2084			if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
2085						rtnl_held) < 0)
2086				goto nla_put_failure;
2087		} else {
2088			goto cls_op_not_supp;
2089		}
2090	} else {
2091		if (tp->ops->dump &&
2092		    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
2093			goto nla_put_failure;
2094	}
2095
2096	if (extack && extack->_msg &&
2097	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2098		goto nla_put_failure;
2099
2100	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2101
2102	return skb->len;
2103
2104out_nlmsg_trim:
2105nla_put_failure:
2106cls_op_not_supp:
2107	nlmsg_trim(skb, b);
2108	return -1;
2109}
2110
2111static int tfilter_notify(struct net *net, struct sk_buff *oskb,
2112			  struct nlmsghdr *n, struct tcf_proto *tp,
2113			  struct tcf_block *block, struct Qdisc *q,
2114			  u32 parent, void *fh, int event, bool unicast,
2115			  bool rtnl_held, struct netlink_ext_ack *extack)
2116{
2117	struct sk_buff *skb;
2118	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2119	int err = 0;
2120
2121	if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2122		return 0;
2123
2124	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2125	if (!skb)
2126		return -ENOBUFS;
2127
2128	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2129			  n->nlmsg_seq, n->nlmsg_flags, event,
2130			  false, rtnl_held, extack) <= 0) {
2131		kfree_skb(skb);
2132		return -EINVAL;
2133	}
2134
2135	if (unicast)
2136		err = rtnl_unicast(skb, net, portid);
2137	else
2138		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2139				     n->nlmsg_flags & NLM_F_ECHO);
 
 
 
2140	return err;
2141}
2142
2143static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
2144			      struct nlmsghdr *n, struct tcf_proto *tp,
2145			      struct tcf_block *block, struct Qdisc *q,
2146			      u32 parent, void *fh, bool *last, bool rtnl_held,
2147			      struct netlink_ext_ack *extack)
2148{
2149	struct sk_buff *skb;
2150	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2151	int err;
2152
2153	if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
2154		return tp->ops->delete(tp, fh, last, rtnl_held, extack);
2155
2156	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2157	if (!skb)
2158		return -ENOBUFS;
2159
2160	if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2161			  n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
2162			  false, rtnl_held, extack) <= 0) {
2163		NL_SET_ERR_MSG(extack, "Failed to build del event notification");
2164		kfree_skb(skb);
2165		return -EINVAL;
2166	}
2167
2168	err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
2169	if (err) {
2170		kfree_skb(skb);
2171		return err;
2172	}
2173
2174	err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2175			     n->nlmsg_flags & NLM_F_ECHO);
 
 
 
2176	if (err < 0)
2177		NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
2178
 
 
2179	return err;
2180}
2181
2182static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
2183				 struct tcf_block *block, struct Qdisc *q,
2184				 u32 parent, struct nlmsghdr *n,
2185				 struct tcf_chain *chain, int event,
2186				 struct netlink_ext_ack *extack)
2187{
2188	struct tcf_proto *tp;
2189
2190	for (tp = tcf_get_next_proto(chain, NULL);
2191	     tp; tp = tcf_get_next_proto(chain, tp))
2192		tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2193			       event, false, true, extack);
2194}
2195
2196static void tfilter_put(struct tcf_proto *tp, void *fh)
2197{
2198	if (tp->ops->put && fh)
2199		tp->ops->put(tp, fh);
2200}
2201
2202static bool is_qdisc_ingress(__u32 classid)
2203{
2204	return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
2205}
2206
2207static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2208			  struct netlink_ext_ack *extack)
2209{
2210	struct net *net = sock_net(skb->sk);
2211	struct nlattr *tca[TCA_MAX + 1];
2212	char name[IFNAMSIZ];
2213	struct tcmsg *t;
2214	u32 protocol;
2215	u32 prio;
2216	bool prio_allocate;
2217	u32 parent;
2218	u32 chain_index;
2219	struct Qdisc *q;
2220	struct tcf_chain_info chain_info;
2221	struct tcf_chain *chain;
2222	struct tcf_block *block;
2223	struct tcf_proto *tp;
2224	unsigned long cl;
2225	void *fh;
2226	int err;
2227	int tp_created;
2228	bool rtnl_held = false;
2229	u32 flags;
 
 
2230
2231replay:
2232	tp_created = 0;
2233
2234	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2235				     rtm_tca_policy, extack);
2236	if (err < 0)
2237		return err;
2238
2239	t = nlmsg_data(n);
2240	protocol = TC_H_MIN(t->tcm_info);
2241	prio = TC_H_MAJ(t->tcm_info);
2242	prio_allocate = false;
2243	parent = t->tcm_parent;
2244	tp = NULL;
2245	cl = 0;
2246	block = NULL;
2247	q = NULL;
2248	chain = NULL;
2249	flags = 0;
2250
2251	if (prio == 0) {
2252		/* If no priority is provided by the user,
2253		 * we allocate one.
2254		 */
2255		if (n->nlmsg_flags & NLM_F_CREATE) {
2256			prio = TC_H_MAKE(0x80000000U, 0U);
2257			prio_allocate = true;
2258		} else {
2259			NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2260			return -ENOENT;
2261		}
2262	}
2263
2264	/* Find head of filter chain. */
2265
2266	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2267	if (err)
2268		return err;
2269
2270	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2271		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2272		err = -EINVAL;
2273		goto errout;
2274	}
2275
2276	/* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2277	 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2278	 * type is not specified, classifier is not unlocked.
2279	 */
2280	if (rtnl_held ||
2281	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2282	    !tcf_proto_is_unlocked(name)) {
2283		rtnl_held = true;
2284		rtnl_lock();
2285	}
2286
2287	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2288	if (err)
2289		goto errout;
2290
2291	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2292				 extack);
2293	if (IS_ERR(block)) {
2294		err = PTR_ERR(block);
2295		goto errout;
2296	}
2297	block->classid = parent;
2298
2299	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2300	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2301		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2302		err = -EINVAL;
2303		goto errout;
2304	}
2305	chain = tcf_chain_get(block, chain_index, true);
2306	if (!chain) {
2307		NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2308		err = -ENOMEM;
2309		goto errout;
2310	}
2311
2312	mutex_lock(&chain->filter_chain_lock);
2313	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2314			       prio, prio_allocate, extack);
2315	if (IS_ERR(tp)) {
 
2316		err = PTR_ERR(tp);
2317		goto errout_locked;
2318	}
2319
2320	if (tp == NULL) {
2321		struct tcf_proto *tp_new = NULL;
2322
2323		if (chain->flushing) {
2324			err = -EAGAIN;
2325			goto errout_locked;
2326		}
2327
2328		/* Proto-tcf does not exist, create new one */
2329
2330		if (tca[TCA_KIND] == NULL || !protocol) {
2331			NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2332			err = -EINVAL;
2333			goto errout_locked;
2334		}
2335
2336		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2337			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2338			err = -ENOENT;
2339			goto errout_locked;
2340		}
2341
2342		if (prio_allocate)
2343			prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2344							       &chain_info));
2345
2346		mutex_unlock(&chain->filter_chain_lock);
2347		tp_new = tcf_proto_create(name, protocol, prio, chain,
2348					  rtnl_held, extack);
 
2349		if (IS_ERR(tp_new)) {
2350			err = PTR_ERR(tp_new);
2351			goto errout_tp;
2352		}
2353
2354		tp_created = 1;
2355		tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2356						rtnl_held);
2357		if (IS_ERR(tp)) {
2358			err = PTR_ERR(tp);
2359			goto errout_tp;
2360		}
2361	} else {
2362		mutex_unlock(&chain->filter_chain_lock);
2363	}
2364
2365	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2366		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2367		err = -EINVAL;
2368		goto errout;
2369	}
2370
2371	fh = tp->ops->get(tp, t->tcm_handle);
2372
2373	if (!fh) {
2374		if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2375			NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2376			err = -ENOENT;
2377			goto errout;
2378		}
2379	} else if (n->nlmsg_flags & NLM_F_EXCL) {
2380		tfilter_put(tp, fh);
2381		NL_SET_ERR_MSG(extack, "Filter already exists");
2382		err = -EEXIST;
2383		goto errout;
2384	}
2385
2386	if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2387		tfilter_put(tp, fh);
2388		NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2389		err = -EINVAL;
2390		goto errout;
2391	}
2392
2393	if (!(n->nlmsg_flags & NLM_F_CREATE))
2394		flags |= TCA_ACT_FLAGS_REPLACE;
2395	if (!rtnl_held)
2396		flags |= TCA_ACT_FLAGS_NO_RTNL;
2397	if (is_qdisc_ingress(parent))
2398		flags |= TCA_ACT_FLAGS_AT_INGRESS;
2399	err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2400			      flags, extack);
 
2401	if (err == 0) {
2402		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2403			       RTM_NEWTFILTER, false, rtnl_held, extack);
2404		tfilter_put(tp, fh);
2405		tcf_proto_count_usesw(tp, true);
2406		/* q pointer is NULL for shared blocks */
2407		if (q)
2408			q->flags &= ~TCQ_F_CAN_BYPASS;
2409	}
2410
2411errout:
2412	if (err && tp_created)
2413		tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2414errout_tp:
2415	if (chain) {
2416		if (tp && !IS_ERR(tp))
2417			tcf_proto_put(tp, rtnl_held, NULL);
2418		if (!tp_created)
2419			tcf_chain_put(chain);
2420	}
2421	tcf_block_release(q, block, rtnl_held);
2422
2423	if (rtnl_held)
2424		rtnl_unlock();
2425
2426	if (err == -EAGAIN) {
2427		/* Take rtnl lock in case EAGAIN is caused by concurrent flush
2428		 * of target chain.
2429		 */
2430		rtnl_held = true;
2431		/* Replay the request. */
2432		goto replay;
2433	}
2434	return err;
2435
2436errout_locked:
2437	mutex_unlock(&chain->filter_chain_lock);
2438	goto errout;
2439}
2440
2441static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2442			  struct netlink_ext_ack *extack)
2443{
2444	struct net *net = sock_net(skb->sk);
2445	struct nlattr *tca[TCA_MAX + 1];
2446	char name[IFNAMSIZ];
2447	struct tcmsg *t;
2448	u32 protocol;
2449	u32 prio;
2450	u32 parent;
2451	u32 chain_index;
2452	struct Qdisc *q = NULL;
2453	struct tcf_chain_info chain_info;
2454	struct tcf_chain *chain = NULL;
2455	struct tcf_block *block = NULL;
2456	struct tcf_proto *tp = NULL;
2457	unsigned long cl = 0;
2458	void *fh = NULL;
2459	int err;
2460	bool rtnl_held = false;
2461
 
 
 
2462	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2463				     rtm_tca_policy, extack);
2464	if (err < 0)
2465		return err;
2466
2467	t = nlmsg_data(n);
2468	protocol = TC_H_MIN(t->tcm_info);
2469	prio = TC_H_MAJ(t->tcm_info);
2470	parent = t->tcm_parent;
2471
2472	if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2473		NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2474		return -ENOENT;
2475	}
2476
2477	/* Find head of filter chain. */
2478
2479	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2480	if (err)
2481		return err;
2482
2483	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2484		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2485		err = -EINVAL;
2486		goto errout;
2487	}
2488	/* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2489	 * found), qdisc is not unlocked, classifier type is not specified,
2490	 * classifier is not unlocked.
2491	 */
2492	if (!prio ||
2493	    (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2494	    !tcf_proto_is_unlocked(name)) {
2495		rtnl_held = true;
2496		rtnl_lock();
2497	}
2498
2499	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2500	if (err)
2501		goto errout;
2502
2503	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2504				 extack);
2505	if (IS_ERR(block)) {
2506		err = PTR_ERR(block);
2507		goto errout;
2508	}
2509
2510	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2511	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2512		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2513		err = -EINVAL;
2514		goto errout;
2515	}
2516	chain = tcf_chain_get(block, chain_index, false);
2517	if (!chain) {
2518		/* User requested flush on non-existent chain. Nothing to do,
2519		 * so just return success.
2520		 */
2521		if (prio == 0) {
2522			err = 0;
2523			goto errout;
2524		}
2525		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2526		err = -ENOENT;
2527		goto errout;
2528	}
2529
2530	if (prio == 0) {
2531		tfilter_notify_chain(net, skb, block, q, parent, n,
2532				     chain, RTM_DELTFILTER, extack);
2533		tcf_chain_flush(chain, rtnl_held);
2534		err = 0;
2535		goto errout;
2536	}
2537
2538	mutex_lock(&chain->filter_chain_lock);
2539	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2540			       prio, false, extack);
2541	if (!tp) {
2542		err = -ENOENT;
2543		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2544		goto errout_locked;
2545	} else if (IS_ERR(tp)) {
2546		err = PTR_ERR(tp);
2547		goto errout_locked;
2548	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2549		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2550		err = -EINVAL;
2551		goto errout_locked;
2552	} else if (t->tcm_handle == 0) {
2553		tcf_proto_signal_destroying(chain, tp);
2554		tcf_chain_tp_remove(chain, &chain_info, tp);
2555		mutex_unlock(&chain->filter_chain_lock);
2556
2557		tcf_proto_put(tp, rtnl_held, NULL);
2558		tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2559			       RTM_DELTFILTER, false, rtnl_held, extack);
2560		err = 0;
2561		goto errout;
2562	}
2563	mutex_unlock(&chain->filter_chain_lock);
2564
2565	fh = tp->ops->get(tp, t->tcm_handle);
2566
2567	if (!fh) {
2568		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2569		err = -ENOENT;
2570	} else {
2571		bool last;
2572
2573		err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2574					 &last, rtnl_held, extack);
 
2575
2576		if (err)
2577			goto errout;
2578		if (last)
2579			tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2580	}
2581
2582errout:
2583	if (chain) {
2584		if (tp && !IS_ERR(tp))
2585			tcf_proto_put(tp, rtnl_held, NULL);
2586		tcf_chain_put(chain);
2587	}
2588	tcf_block_release(q, block, rtnl_held);
2589
2590	if (rtnl_held)
2591		rtnl_unlock();
2592
2593	return err;
2594
2595errout_locked:
2596	mutex_unlock(&chain->filter_chain_lock);
2597	goto errout;
2598}
2599
2600static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2601			  struct netlink_ext_ack *extack)
2602{
2603	struct net *net = sock_net(skb->sk);
2604	struct nlattr *tca[TCA_MAX + 1];
2605	char name[IFNAMSIZ];
2606	struct tcmsg *t;
2607	u32 protocol;
2608	u32 prio;
2609	u32 parent;
2610	u32 chain_index;
2611	struct Qdisc *q = NULL;
2612	struct tcf_chain_info chain_info;
2613	struct tcf_chain *chain = NULL;
2614	struct tcf_block *block = NULL;
2615	struct tcf_proto *tp = NULL;
2616	unsigned long cl = 0;
2617	void *fh = NULL;
2618	int err;
2619	bool rtnl_held = false;
2620
2621	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2622				     rtm_tca_policy, extack);
2623	if (err < 0)
2624		return err;
2625
2626	t = nlmsg_data(n);
2627	protocol = TC_H_MIN(t->tcm_info);
2628	prio = TC_H_MAJ(t->tcm_info);
2629	parent = t->tcm_parent;
2630
2631	if (prio == 0) {
2632		NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2633		return -ENOENT;
2634	}
2635
2636	/* Find head of filter chain. */
2637
2638	err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2639	if (err)
2640		return err;
2641
2642	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2643		NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2644		err = -EINVAL;
2645		goto errout;
2646	}
2647	/* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2648	 * unlocked, classifier type is not specified, classifier is not
2649	 * unlocked.
2650	 */
2651	if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2652	    !tcf_proto_is_unlocked(name)) {
2653		rtnl_held = true;
2654		rtnl_lock();
2655	}
2656
2657	err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2658	if (err)
2659		goto errout;
2660
2661	block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2662				 extack);
2663	if (IS_ERR(block)) {
2664		err = PTR_ERR(block);
2665		goto errout;
2666	}
2667
2668	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
2669	if (chain_index > TC_ACT_EXT_VAL_MASK) {
2670		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2671		err = -EINVAL;
2672		goto errout;
2673	}
2674	chain = tcf_chain_get(block, chain_index, false);
2675	if (!chain) {
2676		NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2677		err = -EINVAL;
2678		goto errout;
2679	}
2680
2681	mutex_lock(&chain->filter_chain_lock);
2682	tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2683			       prio, false, extack);
2684	mutex_unlock(&chain->filter_chain_lock);
2685	if (!tp) {
2686		err = -ENOENT;
2687		NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2688		goto errout;
2689	} else if (IS_ERR(tp)) {
2690		err = PTR_ERR(tp);
2691		goto errout;
2692	} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2693		NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2694		err = -EINVAL;
2695		goto errout;
2696	}
2697
2698	fh = tp->ops->get(tp, t->tcm_handle);
2699
2700	if (!fh) {
2701		NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2702		err = -ENOENT;
2703	} else {
2704		err = tfilter_notify(net, skb, n, tp, block, q, parent,
2705				     fh, RTM_NEWTFILTER, true, rtnl_held, NULL);
2706		if (err < 0)
2707			NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2708	}
2709
2710	tfilter_put(tp, fh);
2711errout:
2712	if (chain) {
2713		if (tp && !IS_ERR(tp))
2714			tcf_proto_put(tp, rtnl_held, NULL);
2715		tcf_chain_put(chain);
2716	}
2717	tcf_block_release(q, block, rtnl_held);
2718
2719	if (rtnl_held)
2720		rtnl_unlock();
2721
2722	return err;
2723}
2724
2725struct tcf_dump_args {
2726	struct tcf_walker w;
2727	struct sk_buff *skb;
2728	struct netlink_callback *cb;
2729	struct tcf_block *block;
2730	struct Qdisc *q;
2731	u32 parent;
2732	bool terse_dump;
2733};
2734
2735static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2736{
2737	struct tcf_dump_args *a = (void *)arg;
2738	struct net *net = sock_net(a->skb->sk);
2739
2740	return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2741			     n, NETLINK_CB(a->cb->skb).portid,
2742			     a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2743			     RTM_NEWTFILTER, a->terse_dump, true, NULL);
2744}
2745
2746static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2747			   struct sk_buff *skb, struct netlink_callback *cb,
2748			   long index_start, long *p_index, bool terse)
2749{
2750	struct net *net = sock_net(skb->sk);
2751	struct tcf_block *block = chain->block;
2752	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2753	struct tcf_proto *tp, *tp_prev;
2754	struct tcf_dump_args arg;
2755
2756	for (tp = __tcf_get_next_proto(chain, NULL);
2757	     tp;
2758	     tp_prev = tp,
2759		     tp = __tcf_get_next_proto(chain, tp),
2760		     tcf_proto_put(tp_prev, true, NULL),
2761		     (*p_index)++) {
2762		if (*p_index < index_start)
2763			continue;
2764		if (TC_H_MAJ(tcm->tcm_info) &&
2765		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2766			continue;
2767		if (TC_H_MIN(tcm->tcm_info) &&
2768		    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2769			continue;
2770		if (*p_index > index_start)
2771			memset(&cb->args[1], 0,
2772			       sizeof(cb->args) - sizeof(cb->args[0]));
2773		if (cb->args[1] == 0) {
2774			if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2775					  NETLINK_CB(cb->skb).portid,
2776					  cb->nlh->nlmsg_seq, NLM_F_MULTI,
2777					  RTM_NEWTFILTER, false, true, NULL) <= 0)
2778				goto errout;
2779			cb->args[1] = 1;
2780		}
2781		if (!tp->ops->walk)
2782			continue;
2783		arg.w.fn = tcf_node_dump;
2784		arg.skb = skb;
2785		arg.cb = cb;
2786		arg.block = block;
2787		arg.q = q;
2788		arg.parent = parent;
2789		arg.w.stop = 0;
2790		arg.w.skip = cb->args[1] - 1;
2791		arg.w.count = 0;
2792		arg.w.cookie = cb->args[2];
2793		arg.terse_dump = terse;
2794		tp->ops->walk(tp, &arg.w, true);
2795		cb->args[2] = arg.w.cookie;
2796		cb->args[1] = arg.w.count + 1;
2797		if (arg.w.stop)
2798			goto errout;
2799	}
2800	return true;
2801
2802errout:
2803	tcf_proto_put(tp, true, NULL);
2804	return false;
2805}
2806
2807static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2808	[TCA_CHAIN]      = { .type = NLA_U32 },
2809	[TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2810};
2811
2812/* called with RTNL */
2813static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2814{
2815	struct tcf_chain *chain, *chain_prev;
2816	struct net *net = sock_net(skb->sk);
2817	struct nlattr *tca[TCA_MAX + 1];
2818	struct Qdisc *q = NULL;
2819	struct tcf_block *block;
2820	struct tcmsg *tcm = nlmsg_data(cb->nlh);
2821	bool terse_dump = false;
2822	long index_start;
2823	long index;
2824	u32 parent;
2825	int err;
2826
2827	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2828		return skb->len;
2829
2830	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2831				     tcf_tfilter_dump_policy, cb->extack);
2832	if (err)
2833		return err;
2834
2835	if (tca[TCA_DUMP_FLAGS]) {
2836		struct nla_bitfield32 flags =
2837			nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2838
2839		terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2840	}
2841
2842	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2843		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2844		if (!block)
2845			goto out;
2846		/* If we work with block index, q is NULL and parent value
2847		 * will never be used in the following code. The check
2848		 * in tcf_fill_node prevents it. However, compiler does not
2849		 * see that far, so set parent to zero to silence the warning
2850		 * about parent being uninitialized.
2851		 */
2852		parent = 0;
2853	} else {
2854		const struct Qdisc_class_ops *cops;
2855		struct net_device *dev;
2856		unsigned long cl = 0;
2857
2858		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2859		if (!dev)
2860			return skb->len;
2861
2862		parent = tcm->tcm_parent;
2863		if (!parent)
2864			q = rtnl_dereference(dev->qdisc);
2865		else
 
2866			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
 
2867		if (!q)
2868			goto out;
2869		cops = q->ops->cl_ops;
2870		if (!cops)
2871			goto out;
2872		if (!cops->tcf_block)
2873			goto out;
2874		if (TC_H_MIN(tcm->tcm_parent)) {
2875			cl = cops->find(q, tcm->tcm_parent);
2876			if (cl == 0)
2877				goto out;
2878		}
2879		block = cops->tcf_block(q, cl, NULL);
2880		if (!block)
2881			goto out;
2882		parent = block->classid;
2883		if (tcf_block_shared(block))
2884			q = NULL;
2885	}
2886
2887	index_start = cb->args[0];
2888	index = 0;
2889
2890	for (chain = __tcf_get_next_chain(block, NULL);
2891	     chain;
2892	     chain_prev = chain,
2893		     chain = __tcf_get_next_chain(block, chain),
2894		     tcf_chain_put(chain_prev)) {
2895		if (tca[TCA_CHAIN] &&
2896		    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2897			continue;
2898		if (!tcf_chain_dump(chain, q, parent, skb, cb,
2899				    index_start, &index, terse_dump)) {
2900			tcf_chain_put(chain);
2901			err = -EMSGSIZE;
2902			break;
2903		}
2904	}
2905
2906	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2907		tcf_block_refcnt_put(block, true);
2908	cb->args[0] = index;
2909
2910out:
2911	/* If we did no progress, the error (EMSGSIZE) is real */
2912	if (skb->len == 0 && err)
2913		return err;
2914	return skb->len;
2915}
2916
2917static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2918			      void *tmplt_priv, u32 chain_index,
2919			      struct net *net, struct sk_buff *skb,
2920			      struct tcf_block *block,
2921			      u32 portid, u32 seq, u16 flags, int event,
2922			      struct netlink_ext_ack *extack)
2923{
2924	unsigned char *b = skb_tail_pointer(skb);
2925	const struct tcf_proto_ops *ops;
2926	struct nlmsghdr *nlh;
2927	struct tcmsg *tcm;
2928	void *priv;
2929
2930	ops = tmplt_ops;
2931	priv = tmplt_priv;
2932
2933	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2934	if (!nlh)
2935		goto out_nlmsg_trim;
2936	tcm = nlmsg_data(nlh);
2937	tcm->tcm_family = AF_UNSPEC;
2938	tcm->tcm__pad1 = 0;
2939	tcm->tcm__pad2 = 0;
2940	tcm->tcm_handle = 0;
2941	if (block->q) {
2942		tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2943		tcm->tcm_parent = block->q->handle;
2944	} else {
2945		tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2946		tcm->tcm_block_index = block->index;
2947	}
2948
2949	if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2950		goto nla_put_failure;
2951
2952	if (ops) {
2953		if (nla_put_string(skb, TCA_KIND, ops->kind))
2954			goto nla_put_failure;
2955		if (ops->tmplt_dump(skb, net, priv) < 0)
2956			goto nla_put_failure;
2957	}
2958
2959	if (extack && extack->_msg &&
2960	    nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg))
2961		goto out_nlmsg_trim;
2962
2963	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2964
2965	return skb->len;
2966
2967out_nlmsg_trim:
2968nla_put_failure:
2969	nlmsg_trim(skb, b);
2970	return -EMSGSIZE;
2971}
2972
2973static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2974			   u32 seq, u16 flags, int event, bool unicast,
2975			   struct netlink_ext_ack *extack)
2976{
2977	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2978	struct tcf_block *block = chain->block;
2979	struct net *net = block->net;
2980	struct sk_buff *skb;
2981	int err = 0;
2982
2983	if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC))
2984		return 0;
2985
2986	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2987	if (!skb)
2988		return -ENOBUFS;
2989
2990	if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2991			       chain->index, net, skb, block, portid,
2992			       seq, flags, event, extack) <= 0) {
2993		kfree_skb(skb);
2994		return -EINVAL;
2995	}
2996
2997	if (unicast)
2998		err = rtnl_unicast(skb, net, portid);
2999	else
3000		err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
3001				     flags & NLM_F_ECHO);
3002
 
 
3003	return err;
3004}
3005
3006static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
3007				  void *tmplt_priv, u32 chain_index,
3008				  struct tcf_block *block, struct sk_buff *oskb,
3009				  u32 seq, u16 flags)
3010{
3011	u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
3012	struct net *net = block->net;
3013	struct sk_buff *skb;
3014
3015	if (!rtnl_notify_needed(net, flags, RTNLGRP_TC))
3016		return 0;
3017
3018	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3019	if (!skb)
3020		return -ENOBUFS;
3021
3022	if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
3023			       block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3024		kfree_skb(skb);
3025		return -EINVAL;
3026	}
3027
 
 
 
3028	return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
3029}
3030
3031static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
3032			      struct nlattr **tca,
3033			      struct netlink_ext_ack *extack)
3034{
3035	const struct tcf_proto_ops *ops;
3036	char name[IFNAMSIZ];
3037	void *tmplt_priv;
3038
3039	/* If kind is not set, user did not specify template. */
3040	if (!tca[TCA_KIND])
3041		return 0;
3042
3043	if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
3044		NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
3045		return -EINVAL;
3046	}
3047
3048	ops = tcf_proto_lookup_ops(name, true, extack);
3049	if (IS_ERR(ops))
3050		return PTR_ERR(ops);
3051	if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
3052	    !ops->tmplt_reoffload) {
3053		NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
3054		module_put(ops->owner);
3055		return -EOPNOTSUPP;
3056	}
3057
3058	tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
3059	if (IS_ERR(tmplt_priv)) {
3060		module_put(ops->owner);
3061		return PTR_ERR(tmplt_priv);
3062	}
3063	chain->tmplt_ops = ops;
3064	chain->tmplt_priv = tmplt_priv;
3065	return 0;
3066}
3067
3068static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
3069			       void *tmplt_priv)
3070{
3071	/* If template ops are set, no work to do for us. */
3072	if (!tmplt_ops)
3073		return;
3074
3075	tmplt_ops->tmplt_destroy(tmplt_priv);
3076	module_put(tmplt_ops->owner);
3077}
3078
3079/* Add/delete/get a chain */
3080
3081static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
3082			struct netlink_ext_ack *extack)
3083{
3084	struct net *net = sock_net(skb->sk);
3085	struct nlattr *tca[TCA_MAX + 1];
3086	struct tcmsg *t;
3087	u32 parent;
3088	u32 chain_index;
3089	struct Qdisc *q;
3090	struct tcf_chain *chain;
3091	struct tcf_block *block;
3092	unsigned long cl;
3093	int err;
3094
 
 
 
 
3095replay:
3096	q = NULL;
3097	err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
3098				     rtm_tca_policy, extack);
3099	if (err < 0)
3100		return err;
3101
3102	t = nlmsg_data(n);
3103	parent = t->tcm_parent;
3104	cl = 0;
3105
3106	block = tcf_block_find(net, &q, &parent, &cl,
3107			       t->tcm_ifindex, t->tcm_block_index, extack);
3108	if (IS_ERR(block))
3109		return PTR_ERR(block);
3110
3111	chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
3112	if (chain_index > TC_ACT_EXT_VAL_MASK) {
3113		NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
3114		err = -EINVAL;
3115		goto errout_block;
3116	}
3117
3118	mutex_lock(&block->lock);
3119	chain = tcf_chain_lookup(block, chain_index);
3120	if (n->nlmsg_type == RTM_NEWCHAIN) {
3121		if (chain) {
3122			if (tcf_chain_held_by_acts_only(chain)) {
3123				/* The chain exists only because there is
3124				 * some action referencing it.
3125				 */
3126				tcf_chain_hold(chain);
3127			} else {
3128				NL_SET_ERR_MSG(extack, "Filter chain already exists");
3129				err = -EEXIST;
3130				goto errout_block_locked;
3131			}
3132		} else {
3133			if (!(n->nlmsg_flags & NLM_F_CREATE)) {
3134				NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
3135				err = -ENOENT;
3136				goto errout_block_locked;
3137			}
3138			chain = tcf_chain_create(block, chain_index);
3139			if (!chain) {
3140				NL_SET_ERR_MSG(extack, "Failed to create filter chain");
3141				err = -ENOMEM;
3142				goto errout_block_locked;
3143			}
3144		}
3145	} else {
3146		if (!chain || tcf_chain_held_by_acts_only(chain)) {
3147			NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
3148			err = -EINVAL;
3149			goto errout_block_locked;
3150		}
3151		tcf_chain_hold(chain);
3152	}
3153
3154	if (n->nlmsg_type == RTM_NEWCHAIN) {
3155		/* Modifying chain requires holding parent block lock. In case
3156		 * the chain was successfully added, take a reference to the
3157		 * chain. This ensures that an empty chain does not disappear at
3158		 * the end of this function.
3159		 */
3160		tcf_chain_hold(chain);
3161		chain->explicitly_created = true;
3162	}
3163	mutex_unlock(&block->lock);
3164
3165	switch (n->nlmsg_type) {
3166	case RTM_NEWCHAIN:
3167		err = tc_chain_tmplt_add(chain, net, tca, extack);
3168		if (err) {
3169			tcf_chain_put_explicitly_created(chain);
3170			goto errout;
3171		}
3172
3173		tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
3174				RTM_NEWCHAIN, false, extack);
3175		break;
3176	case RTM_DELCHAIN:
3177		tfilter_notify_chain(net, skb, block, q, parent, n,
3178				     chain, RTM_DELTFILTER, extack);
3179		/* Flush the chain first as the user requested chain removal. */
3180		tcf_chain_flush(chain, true);
3181		/* In case the chain was successfully deleted, put a reference
3182		 * to the chain previously taken during addition.
3183		 */
3184		tcf_chain_put_explicitly_created(chain);
3185		break;
3186	case RTM_GETCHAIN:
3187		err = tc_chain_notify(chain, skb, n->nlmsg_seq,
3188				      n->nlmsg_flags, n->nlmsg_type, true, extack);
3189		if (err < 0)
3190			NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
3191		break;
3192	default:
3193		err = -EOPNOTSUPP;
3194		NL_SET_ERR_MSG(extack, "Unsupported message type");
3195		goto errout;
3196	}
3197
3198errout:
3199	tcf_chain_put(chain);
3200errout_block:
3201	tcf_block_release(q, block, true);
3202	if (err == -EAGAIN)
3203		/* Replay the request. */
3204		goto replay;
3205	return err;
3206
3207errout_block_locked:
3208	mutex_unlock(&block->lock);
3209	goto errout_block;
3210}
3211
3212/* called with RTNL */
3213static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
3214{
3215	struct net *net = sock_net(skb->sk);
3216	struct nlattr *tca[TCA_MAX + 1];
3217	struct Qdisc *q = NULL;
3218	struct tcf_block *block;
3219	struct tcmsg *tcm = nlmsg_data(cb->nlh);
3220	struct tcf_chain *chain;
3221	long index_start;
3222	long index;
 
3223	int err;
3224
3225	if (nlmsg_len(cb->nlh) < sizeof(*tcm))
3226		return skb->len;
3227
3228	err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
3229				     rtm_tca_policy, cb->extack);
3230	if (err)
3231		return err;
3232
3233	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
3234		block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3235		if (!block)
3236			goto out;
 
 
 
 
 
 
 
3237	} else {
3238		const struct Qdisc_class_ops *cops;
3239		struct net_device *dev;
3240		unsigned long cl = 0;
3241
3242		dev = __dev_get_by_index(net, tcm->tcm_ifindex);
3243		if (!dev)
3244			return skb->len;
3245
3246		if (!tcm->tcm_parent)
3247			q = rtnl_dereference(dev->qdisc);
3248		else
 
 
3249			q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3250
3251		if (!q)
3252			goto out;
3253		cops = q->ops->cl_ops;
3254		if (!cops)
3255			goto out;
3256		if (!cops->tcf_block)
3257			goto out;
3258		if (TC_H_MIN(tcm->tcm_parent)) {
3259			cl = cops->find(q, tcm->tcm_parent);
3260			if (cl == 0)
3261				goto out;
3262		}
3263		block = cops->tcf_block(q, cl, NULL);
3264		if (!block)
3265			goto out;
3266		if (tcf_block_shared(block))
3267			q = NULL;
3268	}
3269
3270	index_start = cb->args[0];
3271	index = 0;
3272
3273	mutex_lock(&block->lock);
3274	list_for_each_entry(chain, &block->chain_list, list) {
3275		if ((tca[TCA_CHAIN] &&
3276		     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3277			continue;
3278		if (index < index_start) {
3279			index++;
3280			continue;
3281		}
3282		if (tcf_chain_held_by_acts_only(chain))
3283			continue;
3284		err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3285					 chain->index, net, skb, block,
3286					 NETLINK_CB(cb->skb).portid,
3287					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3288					 RTM_NEWCHAIN, NULL);
3289		if (err <= 0)
3290			break;
3291		index++;
3292	}
3293	mutex_unlock(&block->lock);
3294
3295	if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3296		tcf_block_refcnt_put(block, true);
3297	cb->args[0] = index;
3298
3299out:
3300	/* If we did no progress, the error (EMSGSIZE) is real */
3301	if (skb->len == 0 && err)
3302		return err;
3303	return skb->len;
3304}
3305
3306int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
3307		     int police, struct tcf_proto *tp, u32 handle,
3308		     bool use_action_miss)
3309{
3310	int err = 0;
3311
3312#ifdef CONFIG_NET_CLS_ACT
3313	exts->type = 0;
3314	exts->nr_actions = 0;
3315	exts->miss_cookie_node = NULL;
3316	/* Note: we do not own yet a reference on net.
3317	 * This reference might be taken later from tcf_exts_get_net().
3318	 */
3319	exts->net = net;
3320	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
3321				GFP_KERNEL);
3322	if (!exts->actions)
3323		return -ENOMEM;
3324#endif
3325
3326	exts->action = action;
3327	exts->police = police;
3328
3329	if (!use_action_miss)
3330		return 0;
3331
3332	err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle);
3333	if (err)
3334		goto err_miss_alloc;
3335
3336	return 0;
3337
3338err_miss_alloc:
3339	tcf_exts_destroy(exts);
3340#ifdef CONFIG_NET_CLS_ACT
3341	exts->actions = NULL;
3342#endif
3343	return err;
3344}
3345EXPORT_SYMBOL(tcf_exts_init_ex);
3346
3347void tcf_exts_destroy(struct tcf_exts *exts)
3348{
3349	tcf_exts_miss_cookie_base_destroy(exts);
3350
3351#ifdef CONFIG_NET_CLS_ACT
3352	if (exts->actions) {
3353		tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3354		kfree(exts->actions);
3355	}
3356	exts->nr_actions = 0;
3357#endif
3358}
3359EXPORT_SYMBOL(tcf_exts_destroy);
3360
3361int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3362			 struct nlattr *rate_tlv, struct tcf_exts *exts,
3363			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
3364{
3365#ifdef CONFIG_NET_CLS_ACT
3366	{
3367		int init_res[TCA_ACT_MAX_PRIO] = {};
3368		struct tc_action *act;
3369		size_t attr_size = 0;
3370
3371		if (exts->police && tb[exts->police]) {
3372			struct tc_action_ops *a_o;
3373
3374			flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3375			a_o = tc_action_load_ops(tb[exts->police], flags,
3376						 extack);
3377			if (IS_ERR(a_o))
3378				return PTR_ERR(a_o);
3379			act = tcf_action_init_1(net, tp, tb[exts->police],
3380						rate_tlv, a_o, init_res, flags,
 
3381						extack);
3382			module_put(a_o->owner);
3383			if (IS_ERR(act))
3384				return PTR_ERR(act);
3385
3386			act->type = exts->type = TCA_OLD_COMPAT;
3387			exts->actions[0] = act;
3388			exts->nr_actions = 1;
3389			tcf_idr_insert_many(exts->actions, init_res);
3390		} else if (exts->action && tb[exts->action]) {
3391			int err;
3392
3393			flags |= TCA_ACT_FLAGS_BIND;
3394			err = tcf_action_init(net, tp, tb[exts->action],
3395					      rate_tlv, exts->actions, init_res,
3396					      &attr_size, flags, fl_flags,
3397					      extack);
3398			if (err < 0)
3399				return err;
3400			exts->nr_actions = err;
3401		}
3402	}
3403#else
3404	if ((exts->action && tb[exts->action]) ||
3405	    (exts->police && tb[exts->police])) {
3406		NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3407		return -EOPNOTSUPP;
3408	}
3409#endif
3410
3411	return 0;
3412}
3413EXPORT_SYMBOL(tcf_exts_validate_ex);
3414
3415int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3416		      struct nlattr *rate_tlv, struct tcf_exts *exts,
3417		      u32 flags, struct netlink_ext_ack *extack)
3418{
3419	return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
3420				    flags, 0, extack);
3421}
3422EXPORT_SYMBOL(tcf_exts_validate);
3423
3424void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3425{
3426#ifdef CONFIG_NET_CLS_ACT
3427	struct tcf_exts old = *dst;
3428
3429	*dst = *src;
3430	tcf_exts_destroy(&old);
3431#endif
3432}
3433EXPORT_SYMBOL(tcf_exts_change);
3434
3435#ifdef CONFIG_NET_CLS_ACT
3436static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3437{
3438	if (exts->nr_actions == 0)
3439		return NULL;
3440	else
3441		return exts->actions[0];
3442}
3443#endif
3444
3445int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3446{
3447#ifdef CONFIG_NET_CLS_ACT
3448	struct nlattr *nest;
3449
3450	if (exts->action && tcf_exts_has_actions(exts)) {
3451		/*
3452		 * again for backward compatible mode - we want
3453		 * to work with both old and new modes of entering
3454		 * tc data even if iproute2  was newer - jhs
3455		 */
3456		if (exts->type != TCA_OLD_COMPAT) {
3457			nest = nla_nest_start_noflag(skb, exts->action);
3458			if (nest == NULL)
3459				goto nla_put_failure;
3460
3461			if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3462			    < 0)
3463				goto nla_put_failure;
3464			nla_nest_end(skb, nest);
3465		} else if (exts->police) {
3466			struct tc_action *act = tcf_exts_first_act(exts);
3467			nest = nla_nest_start_noflag(skb, exts->police);
3468			if (nest == NULL || !act)
3469				goto nla_put_failure;
3470			if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3471				goto nla_put_failure;
3472			nla_nest_end(skb, nest);
3473		}
3474	}
3475	return 0;
3476
3477nla_put_failure:
3478	nla_nest_cancel(skb, nest);
3479	return -1;
3480#else
3481	return 0;
3482#endif
3483}
3484EXPORT_SYMBOL(tcf_exts_dump);
3485
3486int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3487{
3488#ifdef CONFIG_NET_CLS_ACT
3489	struct nlattr *nest;
3490
3491	if (!exts->action || !tcf_exts_has_actions(exts))
3492		return 0;
3493
3494	nest = nla_nest_start_noflag(skb, exts->action);
3495	if (!nest)
3496		goto nla_put_failure;
3497
3498	if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3499		goto nla_put_failure;
3500	nla_nest_end(skb, nest);
3501	return 0;
3502
3503nla_put_failure:
3504	nla_nest_cancel(skb, nest);
3505	return -1;
3506#else
3507	return 0;
3508#endif
3509}
3510EXPORT_SYMBOL(tcf_exts_terse_dump);
3511
3512int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3513{
3514#ifdef CONFIG_NET_CLS_ACT
3515	struct tc_action *a = tcf_exts_first_act(exts);
3516	if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3517		return -1;
3518#endif
3519	return 0;
3520}
3521EXPORT_SYMBOL(tcf_exts_dump_stats);
3522
3523static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3524{
3525	if (*flags & TCA_CLS_FLAGS_IN_HW)
3526		return;
3527	*flags |= TCA_CLS_FLAGS_IN_HW;
3528	atomic_inc(&block->offloadcnt);
3529}
3530
3531static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3532{
3533	if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3534		return;
3535	*flags &= ~TCA_CLS_FLAGS_IN_HW;
3536	atomic_dec(&block->offloadcnt);
3537}
3538
3539static void tc_cls_offload_cnt_update(struct tcf_block *block,
3540				      struct tcf_proto *tp, u32 *cnt,
3541				      u32 *flags, u32 diff, bool add)
3542{
3543	lockdep_assert_held(&block->cb_lock);
3544
3545	spin_lock(&tp->lock);
3546	if (add) {
3547		if (!*cnt)
3548			tcf_block_offload_inc(block, flags);
3549		*cnt += diff;
3550	} else {
3551		*cnt -= diff;
3552		if (!*cnt)
3553			tcf_block_offload_dec(block, flags);
3554	}
3555	spin_unlock(&tp->lock);
3556}
3557
3558static void
3559tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3560			 u32 *cnt, u32 *flags)
3561{
3562	lockdep_assert_held(&block->cb_lock);
3563
3564	spin_lock(&tp->lock);
3565	tcf_block_offload_dec(block, flags);
3566	*cnt = 0;
3567	spin_unlock(&tp->lock);
3568}
3569
3570static int
3571__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3572		   void *type_data, bool err_stop)
3573{
3574	struct flow_block_cb *block_cb;
3575	int ok_count = 0;
3576	int err;
3577
3578	list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3579		err = block_cb->cb(type, type_data, block_cb->cb_priv);
3580		if (err) {
3581			if (err_stop)
3582				return err;
3583		} else {
3584			ok_count++;
3585		}
3586	}
3587	return ok_count;
3588}
3589
3590int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3591		     void *type_data, bool err_stop, bool rtnl_held)
3592{
3593	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3594	int ok_count;
3595
3596retry:
3597	if (take_rtnl)
3598		rtnl_lock();
3599	down_read(&block->cb_lock);
3600	/* Need to obtain rtnl lock if block is bound to devs that require it.
3601	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3602	 * obtain the locks in same order here.
3603	 */
3604	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3605		up_read(&block->cb_lock);
3606		take_rtnl = true;
3607		goto retry;
3608	}
3609
3610	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3611
3612	up_read(&block->cb_lock);
3613	if (take_rtnl)
3614		rtnl_unlock();
3615	return ok_count;
3616}
3617EXPORT_SYMBOL(tc_setup_cb_call);
3618
3619/* Non-destructive filter add. If filter that wasn't already in hardware is
3620 * successfully offloaded, increment block offloads counter. On failure,
3621 * previously offloaded filter is considered to be intact and offloads counter
3622 * is not decremented.
3623 */
3624
3625int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3626		    enum tc_setup_type type, void *type_data, bool err_stop,
3627		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3628{
3629	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3630	int ok_count;
3631
3632retry:
3633	if (take_rtnl)
3634		rtnl_lock();
3635	down_read(&block->cb_lock);
3636	/* Need to obtain rtnl lock if block is bound to devs that require it.
3637	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3638	 * obtain the locks in same order here.
3639	 */
3640	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3641		up_read(&block->cb_lock);
3642		take_rtnl = true;
3643		goto retry;
3644	}
3645
3646	/* Make sure all netdevs sharing this block are offload-capable. */
3647	if (block->nooffloaddevcnt && err_stop) {
3648		ok_count = -EOPNOTSUPP;
3649		goto err_unlock;
3650	}
3651
3652	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3653	if (ok_count < 0)
3654		goto err_unlock;
3655
3656	if (tp->ops->hw_add)
3657		tp->ops->hw_add(tp, type_data);
3658	if (ok_count > 0)
3659		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3660					  ok_count, true);
3661err_unlock:
3662	up_read(&block->cb_lock);
3663	if (take_rtnl)
3664		rtnl_unlock();
3665	return min(ok_count, 0);
3666}
3667EXPORT_SYMBOL(tc_setup_cb_add);
3668
3669/* Destructive filter replace. If filter that wasn't already in hardware is
3670 * successfully offloaded, increment block offload counter. On failure,
3671 * previously offloaded filter is considered to be destroyed and offload counter
3672 * is decremented.
3673 */
3674
3675int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3676			enum tc_setup_type type, void *type_data, bool err_stop,
3677			u32 *old_flags, unsigned int *old_in_hw_count,
3678			u32 *new_flags, unsigned int *new_in_hw_count,
3679			bool rtnl_held)
3680{
3681	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3682	int ok_count;
3683
3684retry:
3685	if (take_rtnl)
3686		rtnl_lock();
3687	down_read(&block->cb_lock);
3688	/* Need to obtain rtnl lock if block is bound to devs that require it.
3689	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3690	 * obtain the locks in same order here.
3691	 */
3692	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3693		up_read(&block->cb_lock);
3694		take_rtnl = true;
3695		goto retry;
3696	}
3697
3698	/* Make sure all netdevs sharing this block are offload-capable. */
3699	if (block->nooffloaddevcnt && err_stop) {
3700		ok_count = -EOPNOTSUPP;
3701		goto err_unlock;
3702	}
3703
3704	tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3705	if (tp->ops->hw_del)
3706		tp->ops->hw_del(tp, type_data);
3707
3708	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3709	if (ok_count < 0)
3710		goto err_unlock;
3711
3712	if (tp->ops->hw_add)
3713		tp->ops->hw_add(tp, type_data);
3714	if (ok_count > 0)
3715		tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3716					  new_flags, ok_count, true);
3717err_unlock:
3718	up_read(&block->cb_lock);
3719	if (take_rtnl)
3720		rtnl_unlock();
3721	return min(ok_count, 0);
3722}
3723EXPORT_SYMBOL(tc_setup_cb_replace);
3724
3725/* Destroy filter and decrement block offload counter, if filter was previously
3726 * offloaded.
3727 */
3728
3729int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3730			enum tc_setup_type type, void *type_data, bool err_stop,
3731			u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3732{
3733	bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3734	int ok_count;
3735
3736retry:
3737	if (take_rtnl)
3738		rtnl_lock();
3739	down_read(&block->cb_lock);
3740	/* Need to obtain rtnl lock if block is bound to devs that require it.
3741	 * In block bind code cb_lock is obtained while holding rtnl, so we must
3742	 * obtain the locks in same order here.
3743	 */
3744	if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3745		up_read(&block->cb_lock);
3746		take_rtnl = true;
3747		goto retry;
3748	}
3749
3750	ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3751
3752	tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3753	if (tp->ops->hw_del)
3754		tp->ops->hw_del(tp, type_data);
3755
3756	up_read(&block->cb_lock);
3757	if (take_rtnl)
3758		rtnl_unlock();
3759	return min(ok_count, 0);
3760}
3761EXPORT_SYMBOL(tc_setup_cb_destroy);
3762
3763int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3764			  bool add, flow_setup_cb_t *cb,
3765			  enum tc_setup_type type, void *type_data,
3766			  void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3767{
3768	int err = cb(type, type_data, cb_priv);
3769
3770	if (err) {
3771		if (add && tc_skip_sw(*flags))
3772			return err;
3773	} else {
3774		tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3775					  add);
3776	}
3777
3778	return 0;
3779}
3780EXPORT_SYMBOL(tc_setup_cb_reoffload);
3781
3782static int tcf_act_get_user_cookie(struct flow_action_entry *entry,
3783				   const struct tc_action *act)
3784{
3785	struct tc_cookie *user_cookie;
3786	int err = 0;
3787
3788	rcu_read_lock();
3789	user_cookie = rcu_dereference(act->user_cookie);
3790	if (user_cookie) {
3791		entry->user_cookie = flow_action_cookie_create(user_cookie->data,
3792							       user_cookie->len,
3793							       GFP_ATOMIC);
3794		if (!entry->user_cookie)
3795			err = -ENOMEM;
3796	}
3797	rcu_read_unlock();
3798	return err;
3799}
 
3800
3801static void tcf_act_put_user_cookie(struct flow_action_entry *entry)
 
3802{
3803	flow_action_cookie_destroy(entry->user_cookie);
 
 
 
 
 
3804}
3805
3806void tc_cleanup_offload_action(struct flow_action *flow_action)
3807{
3808	struct flow_action_entry *entry;
3809	int i;
 
 
3810
3811	flow_action_for_each(i, entry, flow_action) {
3812		tcf_act_put_user_cookie(entry);
3813		if (entry->destructor)
3814			entry->destructor(entry->destructor_priv);
3815	}
 
 
 
 
3816}
3817EXPORT_SYMBOL(tc_cleanup_offload_action);
3818
3819static int tc_setup_offload_act(struct tc_action *act,
3820				struct flow_action_entry *entry,
3821				u32 *index_inc,
3822				struct netlink_ext_ack *extack)
3823{
3824#ifdef CONFIG_NET_CLS_ACT
3825	if (act->ops->offload_act_setup) {
3826		return act->ops->offload_act_setup(act, entry, index_inc, true,
3827						   extack);
3828	} else {
3829		NL_SET_ERR_MSG(extack, "Action does not support offload");
3830		return -EOPNOTSUPP;
3831	}
3832#else
3833	return 0;
3834#endif
3835}
3836
3837int tc_setup_action(struct flow_action *flow_action,
3838		    struct tc_action *actions[],
3839		    u32 miss_cookie_base,
3840		    struct netlink_ext_ack *extack)
3841{
3842	int i, j, k, index, err = 0;
3843	struct tc_action *act;
3844
3845	BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3846	BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3847	BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3848
3849	if (!actions)
3850		return 0;
3851
3852	j = 0;
3853	tcf_act_for_each_action(i, act, actions) {
3854		struct flow_action_entry *entry;
3855
3856		entry = &flow_action->entries[j];
3857		spin_lock_bh(&act->tcfa_lock);
3858		err = tcf_act_get_user_cookie(entry, act);
3859		if (err)
3860			goto err_out_locked;
3861
3862		index = 0;
3863		err = tc_setup_offload_act(act, entry, &index, extack);
3864		if (err)
3865			goto err_out_locked;
3866
3867		for (k = 0; k < index ; k++) {
3868			entry[k].hw_stats = tc_act_hw_stats(act->hw_stats);
3869			entry[k].hw_index = act->tcfa_index;
3870			entry[k].cookie = (unsigned long)act;
3871			entry[k].miss_cookie =
3872				tcf_exts_miss_cookie_get(miss_cookie_base, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3873		}
3874
3875		j += index;
3876
3877		spin_unlock_bh(&act->tcfa_lock);
3878	}
3879
3880err_out:
 
 
 
3881	if (err)
3882		tc_cleanup_offload_action(flow_action);
3883
3884	return err;
3885err_out_locked:
3886	spin_unlock_bh(&act->tcfa_lock);
3887	goto err_out;
3888}
3889
3890int tc_setup_offload_action(struct flow_action *flow_action,
3891			    const struct tcf_exts *exts,
3892			    struct netlink_ext_ack *extack)
3893{
3894#ifdef CONFIG_NET_CLS_ACT
3895	u32 miss_cookie_base;
3896
3897	if (!exts)
3898		return 0;
3899
3900	miss_cookie_base = exts->miss_cookie_node ?
3901			   exts->miss_cookie_node->miss_cookie_base : 0;
3902	return tc_setup_action(flow_action, exts->actions, miss_cookie_base,
3903			       extack);
3904#else
3905	return 0;
3906#endif
3907}
3908EXPORT_SYMBOL(tc_setup_offload_action);
3909
3910unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3911{
3912	unsigned int num_acts = 0;
3913	struct tc_action *act;
3914	int i;
3915
3916	tcf_exts_for_each_action(i, act, exts) {
3917		if (is_tcf_pedit(act))
3918			num_acts += tcf_pedit_nkeys(act);
3919		else
3920			num_acts++;
3921	}
3922	return num_acts;
3923}
3924EXPORT_SYMBOL(tcf_exts_num_actions);
3925
3926#ifdef CONFIG_NET_CLS_ACT
3927static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3928					u32 *p_block_index,
3929					struct netlink_ext_ack *extack)
3930{
3931	*p_block_index = nla_get_u32(block_index_attr);
3932	if (!*p_block_index) {
3933		NL_SET_ERR_MSG(extack, "Block number may not be zero");
3934		return -EINVAL;
3935	}
3936
3937	return 0;
3938}
3939
3940int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3941		    enum flow_block_binder_type binder_type,
3942		    struct nlattr *block_index_attr,
3943		    struct netlink_ext_ack *extack)
3944{
3945	u32 block_index;
3946	int err;
3947
3948	if (!block_index_attr)
3949		return 0;
3950
3951	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3952	if (err)
3953		return err;
3954
3955	qe->info.binder_type = binder_type;
3956	qe->info.chain_head_change = tcf_chain_head_change_dflt;
3957	qe->info.chain_head_change_priv = &qe->filter_chain;
3958	qe->info.block_index = block_index;
3959
3960	return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3961}
3962EXPORT_SYMBOL(tcf_qevent_init);
3963
3964void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3965{
3966	if (qe->info.block_index)
3967		tcf_block_put_ext(qe->block, sch, &qe->info);
3968}
3969EXPORT_SYMBOL(tcf_qevent_destroy);
3970
3971int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3972			       struct netlink_ext_ack *extack)
3973{
3974	u32 block_index;
3975	int err;
3976
3977	if (!block_index_attr)
3978		return 0;
3979
3980	err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3981	if (err)
3982		return err;
3983
3984	/* Bounce newly-configured block or change in block. */
3985	if (block_index != qe->info.block_index) {
3986		NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3987		return -EINVAL;
3988	}
3989
3990	return 0;
3991}
3992EXPORT_SYMBOL(tcf_qevent_validate_change);
3993
3994struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3995				  struct sk_buff **to_free, int *ret)
3996{
3997	struct tcf_result cl_res;
3998	struct tcf_proto *fl;
3999
4000	if (!qe->info.block_index)
4001		return skb;
4002
4003	fl = rcu_dereference_bh(qe->filter_chain);
4004
4005	switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
4006	case TC_ACT_SHOT:
4007		qdisc_qstats_drop(sch);
4008		__qdisc_drop(skb, to_free);
4009		*ret = __NET_XMIT_BYPASS;
4010		return NULL;
4011	case TC_ACT_STOLEN:
4012	case TC_ACT_QUEUED:
4013	case TC_ACT_TRAP:
4014		__qdisc_drop(skb, to_free);
4015		*ret = __NET_XMIT_STOLEN;
4016		return NULL;
4017	case TC_ACT_REDIRECT:
4018		skb_do_redirect(skb);
4019		*ret = __NET_XMIT_STOLEN;
4020		return NULL;
4021	}
4022
4023	return skb;
4024}
4025EXPORT_SYMBOL(tcf_qevent_handle);
4026
4027int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
4028{
4029	if (!qe->info.block_index)
4030		return 0;
4031	return nla_put_u32(skb, attr_name, qe->info.block_index);
4032}
4033EXPORT_SYMBOL(tcf_qevent_dump);
4034#endif
4035
4036static __net_init int tcf_net_init(struct net *net)
4037{
4038	struct tcf_net *tn = net_generic(net, tcf_net_id);
4039
4040	spin_lock_init(&tn->idr_lock);
4041	idr_init(&tn->idr);
4042	return 0;
4043}
4044
4045static void __net_exit tcf_net_exit(struct net *net)
4046{
4047	struct tcf_net *tn = net_generic(net, tcf_net_id);
4048
4049	idr_destroy(&tn->idr);
4050}
4051
4052static struct pernet_operations tcf_net_ops = {
4053	.init = tcf_net_init,
4054	.exit = tcf_net_exit,
4055	.id   = &tcf_net_id,
4056	.size = sizeof(struct tcf_net),
4057};
4058
4059static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = {
4060	{.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter,
4061	 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4062	{.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter,
4063	 .flags = RTNL_FLAG_DOIT_UNLOCKED},
4064	{.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter,
4065	 .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED},
4066	{.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain},
4067	{.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain},
4068	{.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain,
4069	 .dumpit = tc_dump_chain},
4070};
4071
4072static int __init tc_filter_init(void)
4073{
4074	int err;
4075
4076	tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
4077	if (!tc_filter_wq)
4078		return -ENOMEM;
4079
4080	err = register_pernet_subsys(&tcf_net_ops);
4081	if (err)
4082		goto err_register_pernet_subsys;
4083
4084	xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
4085	rtnl_register_many(tc_filter_rtnl_msg_handlers);
 
 
 
 
 
 
 
 
 
 
4086
4087	return 0;
4088
4089err_register_pernet_subsys:
4090	destroy_workqueue(tc_filter_wq);
4091	return err;
4092}
4093
4094subsys_initcall(tc_filter_init);