Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *
   7 *	The filters are packed to hash tables of key nodes
   8 *	with a set of 32bit key/mask pairs at every node.
   9 *	Nodes reference next level hash tables etc.
  10 *
  11 *	This scheme is the best universal classifier I managed to
  12 *	invent; it is not super-fast, but it is not slow (provided you
  13 *	program it correctly), and general enough.  And its relative
  14 *	speed grows as the number of rules becomes larger.
  15 *
  16 *	It seems that it represents the best middle point between
  17 *	speed and manageability both by human and by machine.
  18 *
  19 *	It is especially useful for link sharing combined with QoS;
  20 *	pure RSVP doesn't need such a general approach and can use
  21 *	much simpler (and faster) schemes, sort of cls_rsvp.c.
  22 *
  23 *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/types.h>
  29#include <linux/kernel.h>
  30#include <linux/string.h>
  31#include <linux/errno.h>
  32#include <linux/percpu.h>
  33#include <linux/rtnetlink.h>
  34#include <linux/skbuff.h>
  35#include <linux/bitmap.h>
  36#include <linux/netdevice.h>
  37#include <linux/hash.h>
  38#include <net/netlink.h>
  39#include <net/act_api.h>
  40#include <net/pkt_cls.h>
  41#include <linux/idr.h>
  42
  43struct tc_u_knode {
  44	struct tc_u_knode __rcu	*next;
  45	u32			handle;
  46	struct tc_u_hnode __rcu	*ht_up;
  47	struct tcf_exts		exts;
  48	int			ifindex;
  49	u8			fshift;
  50	struct tcf_result	res;
  51	struct tc_u_hnode __rcu	*ht_down;
  52#ifdef CONFIG_CLS_U32_PERF
  53	struct tc_u32_pcnt __percpu *pf;
  54#endif
  55	u32			flags;
  56	unsigned int		in_hw_count;
  57#ifdef CONFIG_CLS_U32_MARK
  58	u32			val;
  59	u32			mask;
  60	u32 __percpu		*pcpu_success;
  61#endif
  62	struct rcu_work		rwork;
  63	/* The 'sel' field MUST be the last field in structure to allow for
  64	 * tc_u32_keys allocated at end of structure.
  65	 */
  66	struct tc_u32_sel	sel;
  67};
  68
  69struct tc_u_hnode {
  70	struct tc_u_hnode __rcu	*next;
  71	u32			handle;
  72	u32			prio;
  73	int			refcnt;
  74	unsigned int		divisor;
  75	struct idr		handle_idr;
  76	bool			is_root;
  77	struct rcu_head		rcu;
  78	u32			flags;
  79	/* The 'ht' field MUST be the last field in structure to allow for
  80	 * more entries allocated at end of structure.
  81	 */
  82	struct tc_u_knode __rcu	*ht[1];
  83};
  84
  85struct tc_u_common {
  86	struct tc_u_hnode __rcu	*hlist;
  87	void			*ptr;
  88	int			refcnt;
  89	struct idr		handle_idr;
  90	struct hlist_node	hnode;
  91	long			knodes;
  92};
  93
  94static inline unsigned int u32_hash_fold(__be32 key,
  95					 const struct tc_u32_sel *sel,
  96					 u8 fshift)
  97{
  98	unsigned int h = ntohl(key & sel->hmask) >> fshift;
  99
 100	return h;
 101}
 102
 103static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 104			struct tcf_result *res)
 105{
 106	struct {
 107		struct tc_u_knode *knode;
 108		unsigned int	  off;
 109	} stack[TC_U32_MAXDEPTH];
 110
 111	struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
 112	unsigned int off = skb_network_offset(skb);
 113	struct tc_u_knode *n;
 114	int sdepth = 0;
 115	int off2 = 0;
 116	int sel = 0;
 117#ifdef CONFIG_CLS_U32_PERF
 118	int j;
 119#endif
 120	int i, r;
 121
 122next_ht:
 123	n = rcu_dereference_bh(ht->ht[sel]);
 124
 125next_knode:
 126	if (n) {
 127		struct tc_u32_key *key = n->sel.keys;
 128
 129#ifdef CONFIG_CLS_U32_PERF
 130		__this_cpu_inc(n->pf->rcnt);
 131		j = 0;
 132#endif
 133
 134		if (tc_skip_sw(n->flags)) {
 135			n = rcu_dereference_bh(n->next);
 136			goto next_knode;
 137		}
 138
 139#ifdef CONFIG_CLS_U32_MARK
 140		if ((skb->mark & n->mask) != n->val) {
 141			n = rcu_dereference_bh(n->next);
 142			goto next_knode;
 143		} else {
 144			__this_cpu_inc(*n->pcpu_success);
 145		}
 146#endif
 147
 148		for (i = n->sel.nkeys; i > 0; i--, key++) {
 149			int toff = off + key->off + (off2 & key->offmask);
 150			__be32 *data, hdata;
 151
 152			if (skb_headroom(skb) + toff > INT_MAX)
 153				goto out;
 154
 155			data = skb_header_pointer(skb, toff, 4, &hdata);
 156			if (!data)
 157				goto out;
 158			if ((*data ^ key->val) & key->mask) {
 159				n = rcu_dereference_bh(n->next);
 160				goto next_knode;
 161			}
 162#ifdef CONFIG_CLS_U32_PERF
 163			__this_cpu_inc(n->pf->kcnts[j]);
 164			j++;
 165#endif
 166		}
 167
 168		ht = rcu_dereference_bh(n->ht_down);
 169		if (!ht) {
 170check_terminal:
 171			if (n->sel.flags & TC_U32_TERMINAL) {
 172
 173				*res = n->res;
 174				if (!tcf_match_indev(skb, n->ifindex)) {
 175					n = rcu_dereference_bh(n->next);
 176					goto next_knode;
 177				}
 178#ifdef CONFIG_CLS_U32_PERF
 179				__this_cpu_inc(n->pf->rhit);
 180#endif
 181				r = tcf_exts_exec(skb, &n->exts, res);
 182				if (r < 0) {
 183					n = rcu_dereference_bh(n->next);
 184					goto next_knode;
 185				}
 186
 187				return r;
 188			}
 189			n = rcu_dereference_bh(n->next);
 190			goto next_knode;
 191		}
 192
 193		/* PUSH */
 194		if (sdepth >= TC_U32_MAXDEPTH)
 195			goto deadloop;
 196		stack[sdepth].knode = n;
 197		stack[sdepth].off = off;
 198		sdepth++;
 199
 200		ht = rcu_dereference_bh(n->ht_down);
 201		sel = 0;
 202		if (ht->divisor) {
 203			__be32 *data, hdata;
 204
 205			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
 206						  &hdata);
 207			if (!data)
 208				goto out;
 209			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
 210							  n->fshift);
 211		}
 212		if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
 213			goto next_ht;
 214
 215		if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
 216			off2 = n->sel.off + 3;
 217			if (n->sel.flags & TC_U32_VAROFFSET) {
 218				__be16 *data, hdata;
 219
 220				data = skb_header_pointer(skb,
 221							  off + n->sel.offoff,
 222							  2, &hdata);
 223				if (!data)
 224					goto out;
 225				off2 += ntohs(n->sel.offmask & *data) >>
 226					n->sel.offshift;
 227			}
 228			off2 &= ~3;
 229		}
 230		if (n->sel.flags & TC_U32_EAT) {
 231			off += off2;
 232			off2 = 0;
 233		}
 234
 235		if (off < skb->len)
 236			goto next_ht;
 237	}
 238
 239	/* POP */
 240	if (sdepth--) {
 241		n = stack[sdepth].knode;
 242		ht = rcu_dereference_bh(n->ht_up);
 243		off = stack[sdepth].off;
 244		goto check_terminal;
 245	}
 246out:
 247	return -1;
 248
 249deadloop:
 250	net_warn_ratelimited("cls_u32: dead loop\n");
 251	return -1;
 252}
 253
 254static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
 255{
 256	struct tc_u_hnode *ht;
 257
 258	for (ht = rtnl_dereference(tp_c->hlist);
 259	     ht;
 260	     ht = rtnl_dereference(ht->next))
 261		if (ht->handle == handle)
 262			break;
 263
 264	return ht;
 265}
 266
 267static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
 268{
 269	unsigned int sel;
 270	struct tc_u_knode *n = NULL;
 271
 272	sel = TC_U32_HASH(handle);
 273	if (sel > ht->divisor)
 274		goto out;
 275
 276	for (n = rtnl_dereference(ht->ht[sel]);
 277	     n;
 278	     n = rtnl_dereference(n->next))
 279		if (n->handle == handle)
 280			break;
 281out:
 282	return n;
 283}
 284
 285
 286static void *u32_get(struct tcf_proto *tp, u32 handle)
 287{
 288	struct tc_u_hnode *ht;
 289	struct tc_u_common *tp_c = tp->data;
 290
 291	if (TC_U32_HTID(handle) == TC_U32_ROOT)
 292		ht = rtnl_dereference(tp->root);
 293	else
 294		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
 295
 296	if (!ht)
 297		return NULL;
 298
 299	if (TC_U32_KEY(handle) == 0)
 300		return ht;
 301
 302	return u32_lookup_key(ht, handle);
 303}
 304
 305/* Protected by rtnl lock */
 306static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
 307{
 308	int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
 309	if (id < 0)
 310		return 0;
 311	return (id | 0x800U) << 20;
 312}
 313
 314static struct hlist_head *tc_u_common_hash;
 315
 316#define U32_HASH_SHIFT 10
 317#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
 318
 319static void *tc_u_common_ptr(const struct tcf_proto *tp)
 320{
 321	struct tcf_block *block = tp->chain->block;
 322
 323	/* The block sharing is currently supported only
 324	 * for classless qdiscs. In that case we use block
 325	 * for tc_u_common identification. In case the
 326	 * block is not shared, block->q is a valid pointer
 327	 * and we can use that. That works for classful qdiscs.
 328	 */
 329	if (tcf_block_shared(block))
 330		return block;
 331	else
 332		return block->q;
 333}
 334
 335static struct hlist_head *tc_u_hash(void *key)
 336{
 337	return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
 338}
 339
 340static struct tc_u_common *tc_u_common_find(void *key)
 341{
 342	struct tc_u_common *tc;
 343	hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
 344		if (tc->ptr == key)
 345			return tc;
 346	}
 347	return NULL;
 348}
 349
 350static int u32_init(struct tcf_proto *tp)
 351{
 352	struct tc_u_hnode *root_ht;
 353	void *key = tc_u_common_ptr(tp);
 354	struct tc_u_common *tp_c = tc_u_common_find(key);
 355
 356	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
 357	if (root_ht == NULL)
 358		return -ENOBUFS;
 359
 360	root_ht->refcnt++;
 361	root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
 362	root_ht->prio = tp->prio;
 363	root_ht->is_root = true;
 364	idr_init(&root_ht->handle_idr);
 365
 366	if (tp_c == NULL) {
 367		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
 368		if (tp_c == NULL) {
 369			kfree(root_ht);
 370			return -ENOBUFS;
 371		}
 372		tp_c->ptr = key;
 373		INIT_HLIST_NODE(&tp_c->hnode);
 374		idr_init(&tp_c->handle_idr);
 375
 376		hlist_add_head(&tp_c->hnode, tc_u_hash(key));
 377	}
 378
 379	tp_c->refcnt++;
 380	RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
 381	rcu_assign_pointer(tp_c->hlist, root_ht);
 382
 383	root_ht->refcnt++;
 384	rcu_assign_pointer(tp->root, root_ht);
 385	tp->data = tp_c;
 386	return 0;
 387}
 388
 389static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
 390{
 391	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 392
 393	tcf_exts_destroy(&n->exts);
 394	tcf_exts_put_net(&n->exts);
 395	if (ht && --ht->refcnt == 0)
 396		kfree(ht);
 397#ifdef CONFIG_CLS_U32_PERF
 398	if (free_pf)
 399		free_percpu(n->pf);
 400#endif
 401#ifdef CONFIG_CLS_U32_MARK
 402	if (free_pf)
 403		free_percpu(n->pcpu_success);
 404#endif
 405	kfree(n);
 406	return 0;
 407}
 408
 409/* u32_delete_key_rcu should be called when free'ing a copied
 410 * version of a tc_u_knode obtained from u32_init_knode(). When
 411 * copies are obtained from u32_init_knode() the statistics are
 412 * shared between the old and new copies to allow readers to
 413 * continue to update the statistics during the copy. To support
 414 * this the u32_delete_key_rcu variant does not free the percpu
 415 * statistics.
 416 */
 417static void u32_delete_key_work(struct work_struct *work)
 418{
 419	struct tc_u_knode *key = container_of(to_rcu_work(work),
 420					      struct tc_u_knode,
 421					      rwork);
 422	rtnl_lock();
 423	u32_destroy_key(key, false);
 424	rtnl_unlock();
 425}
 426
 427/* u32_delete_key_freepf_rcu is the rcu callback variant
 428 * that free's the entire structure including the statistics
 429 * percpu variables. Only use this if the key is not a copy
 430 * returned by u32_init_knode(). See u32_delete_key_rcu()
 431 * for the variant that should be used with keys return from
 432 * u32_init_knode()
 433 */
 434static void u32_delete_key_freepf_work(struct work_struct *work)
 435{
 436	struct tc_u_knode *key = container_of(to_rcu_work(work),
 437					      struct tc_u_knode,
 438					      rwork);
 439	rtnl_lock();
 440	u32_destroy_key(key, true);
 441	rtnl_unlock();
 442}
 443
 444static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
 445{
 446	struct tc_u_common *tp_c = tp->data;
 447	struct tc_u_knode __rcu **kp;
 448	struct tc_u_knode *pkp;
 449	struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
 450
 451	if (ht) {
 452		kp = &ht->ht[TC_U32_HASH(key->handle)];
 453		for (pkp = rtnl_dereference(*kp); pkp;
 454		     kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
 455			if (pkp == key) {
 456				RCU_INIT_POINTER(*kp, key->next);
 457				tp_c->knodes--;
 458
 459				tcf_unbind_filter(tp, &key->res);
 460				idr_remove(&ht->handle_idr, key->handle);
 461				tcf_exts_get_net(&key->exts);
 462				tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
 463				return 0;
 464			}
 465		}
 466	}
 467	WARN_ON(1);
 468	return 0;
 469}
 470
 471static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 472			       struct netlink_ext_ack *extack)
 473{
 474	struct tcf_block *block = tp->chain->block;
 475	struct tc_cls_u32_offload cls_u32 = {};
 476
 477	tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
 478	cls_u32.command = TC_CLSU32_DELETE_HNODE;
 479	cls_u32.hnode.divisor = h->divisor;
 480	cls_u32.hnode.handle = h->handle;
 481	cls_u32.hnode.prio = h->prio;
 482
 483	tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
 484}
 485
 486static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 487				u32 flags, struct netlink_ext_ack *extack)
 488{
 489	struct tcf_block *block = tp->chain->block;
 490	struct tc_cls_u32_offload cls_u32 = {};
 491	bool skip_sw = tc_skip_sw(flags);
 492	bool offloaded = false;
 493	int err;
 494
 495	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
 496	cls_u32.command = TC_CLSU32_NEW_HNODE;
 497	cls_u32.hnode.divisor = h->divisor;
 498	cls_u32.hnode.handle = h->handle;
 499	cls_u32.hnode.prio = h->prio;
 500
 501	err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
 502	if (err < 0) {
 503		u32_clear_hw_hnode(tp, h, NULL);
 504		return err;
 505	} else if (err > 0) {
 506		offloaded = true;
 507	}
 508
 509	if (skip_sw && !offloaded)
 510		return -EINVAL;
 511
 512	return 0;
 513}
 514
 515static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 516				struct netlink_ext_ack *extack)
 517{
 518	struct tcf_block *block = tp->chain->block;
 519	struct tc_cls_u32_offload cls_u32 = {};
 520
 521	tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
 522	cls_u32.command = TC_CLSU32_DELETE_KNODE;
 523	cls_u32.knode.handle = n->handle;
 524
 525	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
 526			    &n->flags, &n->in_hw_count, true);
 527}
 528
 529static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 530				u32 flags, struct netlink_ext_ack *extack)
 531{
 532	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 533	struct tcf_block *block = tp->chain->block;
 534	struct tc_cls_u32_offload cls_u32 = {};
 535	bool skip_sw = tc_skip_sw(flags);
 536	int err;
 537
 538	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
 539	cls_u32.command = TC_CLSU32_REPLACE_KNODE;
 540	cls_u32.knode.handle = n->handle;
 541	cls_u32.knode.fshift = n->fshift;
 542#ifdef CONFIG_CLS_U32_MARK
 543	cls_u32.knode.val = n->val;
 544	cls_u32.knode.mask = n->mask;
 545#else
 546	cls_u32.knode.val = 0;
 547	cls_u32.knode.mask = 0;
 548#endif
 549	cls_u32.knode.sel = &n->sel;
 550	cls_u32.knode.res = &n->res;
 551	cls_u32.knode.exts = &n->exts;
 552	if (n->ht_down)
 553		cls_u32.knode.link_handle = ht->handle;
 554
 555	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
 556			      &n->flags, &n->in_hw_count, true);
 557	if (err) {
 558		u32_remove_hw_knode(tp, n, NULL);
 559		return err;
 560	}
 561
 562	if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
 563		return -EINVAL;
 564
 565	return 0;
 566}
 567
 568static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
 569			    struct netlink_ext_ack *extack)
 570{
 571	struct tc_u_common *tp_c = tp->data;
 572	struct tc_u_knode *n;
 573	unsigned int h;
 574
 575	for (h = 0; h <= ht->divisor; h++) {
 576		while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
 577			RCU_INIT_POINTER(ht->ht[h],
 578					 rtnl_dereference(n->next));
 579			tp_c->knodes--;
 580			tcf_unbind_filter(tp, &n->res);
 581			u32_remove_hw_knode(tp, n, extack);
 582			idr_remove(&ht->handle_idr, n->handle);
 583			if (tcf_exts_get_net(&n->exts))
 584				tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
 585			else
 586				u32_destroy_key(n, true);
 587		}
 588	}
 589}
 590
 591static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
 592			     struct netlink_ext_ack *extack)
 593{
 594	struct tc_u_common *tp_c = tp->data;
 595	struct tc_u_hnode __rcu **hn;
 596	struct tc_u_hnode *phn;
 597
 598	WARN_ON(--ht->refcnt);
 599
 600	u32_clear_hnode(tp, ht, extack);
 601
 602	hn = &tp_c->hlist;
 603	for (phn = rtnl_dereference(*hn);
 604	     phn;
 605	     hn = &phn->next, phn = rtnl_dereference(*hn)) {
 606		if (phn == ht) {
 607			u32_clear_hw_hnode(tp, ht, extack);
 608			idr_destroy(&ht->handle_idr);
 609			idr_remove(&tp_c->handle_idr, ht->handle);
 610			RCU_INIT_POINTER(*hn, ht->next);
 611			kfree_rcu(ht, rcu);
 612			return 0;
 613		}
 614	}
 615
 616	return -ENOENT;
 617}
 618
 619static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
 620			struct netlink_ext_ack *extack)
 621{
 622	struct tc_u_common *tp_c = tp->data;
 623	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
 624
 625	WARN_ON(root_ht == NULL);
 626
 627	if (root_ht && --root_ht->refcnt == 1)
 628		u32_destroy_hnode(tp, root_ht, extack);
 629
 630	if (--tp_c->refcnt == 0) {
 631		struct tc_u_hnode *ht;
 632
 633		hlist_del(&tp_c->hnode);
 634
 635		while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
 636			u32_clear_hnode(tp, ht, extack);
 637			RCU_INIT_POINTER(tp_c->hlist, ht->next);
 638
 639			/* u32_destroy_key() will later free ht for us, if it's
 640			 * still referenced by some knode
 641			 */
 642			if (--ht->refcnt == 0)
 643				kfree_rcu(ht, rcu);
 644		}
 645
 646		idr_destroy(&tp_c->handle_idr);
 647		kfree(tp_c);
 648	}
 649
 650	tp->data = NULL;
 651}
 652
 653static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
 654		      bool rtnl_held, struct netlink_ext_ack *extack)
 655{
 656	struct tc_u_hnode *ht = arg;
 657	struct tc_u_common *tp_c = tp->data;
 658	int ret = 0;
 659
 660	if (TC_U32_KEY(ht->handle)) {
 661		u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
 662		ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
 663		goto out;
 664	}
 665
 666	if (ht->is_root) {
 667		NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
 668		return -EINVAL;
 669	}
 670
 671	if (ht->refcnt == 1) {
 672		u32_destroy_hnode(tp, ht, extack);
 673	} else {
 674		NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
 675		return -EBUSY;
 676	}
 677
 678out:
 679	*last = tp_c->refcnt == 1 && tp_c->knodes == 0;
 680	return ret;
 681}
 682
 683static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
 684{
 685	u32 index = htid | 0x800;
 686	u32 max = htid | 0xFFF;
 687
 688	if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
 689		index = htid + 1;
 690		if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
 691				 GFP_KERNEL))
 692			index = max;
 693	}
 694
 695	return index;
 696}
 697
 698static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
 699	[TCA_U32_CLASSID]	= { .type = NLA_U32 },
 700	[TCA_U32_HASH]		= { .type = NLA_U32 },
 701	[TCA_U32_LINK]		= { .type = NLA_U32 },
 702	[TCA_U32_DIVISOR]	= { .type = NLA_U32 },
 703	[TCA_U32_SEL]		= { .len = sizeof(struct tc_u32_sel) },
 704	[TCA_U32_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
 705	[TCA_U32_MARK]		= { .len = sizeof(struct tc_u32_mark) },
 706	[TCA_U32_FLAGS]		= { .type = NLA_U32 },
 707};
 708
 709static int u32_set_parms(struct net *net, struct tcf_proto *tp,
 710			 unsigned long base,
 711			 struct tc_u_knode *n, struct nlattr **tb,
 712			 struct nlattr *est, bool ovr,
 713			 struct netlink_ext_ack *extack)
 714{
 715	int err;
 716
 717	err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
 718	if (err < 0)
 719		return err;
 720
 721	if (tb[TCA_U32_LINK]) {
 722		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
 723		struct tc_u_hnode *ht_down = NULL, *ht_old;
 724
 725		if (TC_U32_KEY(handle)) {
 726			NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
 727			return -EINVAL;
 728		}
 729
 730		if (handle) {
 731			ht_down = u32_lookup_ht(tp->data, handle);
 732
 733			if (!ht_down) {
 734				NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
 735				return -EINVAL;
 736			}
 737			if (ht_down->is_root) {
 738				NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
 739				return -EINVAL;
 740			}
 741			ht_down->refcnt++;
 742		}
 743
 744		ht_old = rtnl_dereference(n->ht_down);
 745		rcu_assign_pointer(n->ht_down, ht_down);
 746
 747		if (ht_old)
 748			ht_old->refcnt--;
 749	}
 750	if (tb[TCA_U32_CLASSID]) {
 751		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
 752		tcf_bind_filter(tp, &n->res, base);
 753	}
 754
 755	if (tb[TCA_U32_INDEV]) {
 756		int ret;
 757		ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
 758		if (ret < 0)
 759			return -EINVAL;
 760		n->ifindex = ret;
 761	}
 762	return 0;
 763}
 764
 765static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
 766			      struct tc_u_knode *n)
 767{
 768	struct tc_u_knode __rcu **ins;
 769	struct tc_u_knode *pins;
 770	struct tc_u_hnode *ht;
 771
 772	if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
 773		ht = rtnl_dereference(tp->root);
 774	else
 775		ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
 776
 777	ins = &ht->ht[TC_U32_HASH(n->handle)];
 778
 779	/* The node must always exist for it to be replaced if this is not the
 780	 * case then something went very wrong elsewhere.
 781	 */
 782	for (pins = rtnl_dereference(*ins); ;
 783	     ins = &pins->next, pins = rtnl_dereference(*ins))
 784		if (pins->handle == n->handle)
 785			break;
 786
 787	idr_replace(&ht->handle_idr, n, n->handle);
 788	RCU_INIT_POINTER(n->next, pins->next);
 789	rcu_assign_pointer(*ins, n);
 790}
 791
 792static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
 793					 struct tc_u_knode *n)
 794{
 795	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 796	struct tc_u32_sel *s = &n->sel;
 797	struct tc_u_knode *new;
 798
 799	new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
 800		      GFP_KERNEL);
 801
 802	if (!new)
 803		return NULL;
 804
 805	RCU_INIT_POINTER(new->next, n->next);
 806	new->handle = n->handle;
 807	RCU_INIT_POINTER(new->ht_up, n->ht_up);
 808
 809	new->ifindex = n->ifindex;
 810	new->fshift = n->fshift;
 811	new->res = n->res;
 812	new->flags = n->flags;
 813	RCU_INIT_POINTER(new->ht_down, ht);
 814
 815	/* bump reference count as long as we hold pointer to structure */
 816	if (ht)
 817		ht->refcnt++;
 818
 819#ifdef CONFIG_CLS_U32_PERF
 820	/* Statistics may be incremented by readers during update
 821	 * so we must keep them in tact. When the node is later destroyed
 822	 * a special destroy call must be made to not free the pf memory.
 823	 */
 824	new->pf = n->pf;
 825#endif
 826
 827#ifdef CONFIG_CLS_U32_MARK
 828	new->val = n->val;
 829	new->mask = n->mask;
 830	/* Similarly success statistics must be moved as pointers */
 831	new->pcpu_success = n->pcpu_success;
 832#endif
 833	memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
 834
 835	if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
 836		kfree(new);
 837		return NULL;
 838	}
 839
 840	return new;
 841}
 842
 843static int u32_change(struct net *net, struct sk_buff *in_skb,
 844		      struct tcf_proto *tp, unsigned long base, u32 handle,
 845		      struct nlattr **tca, void **arg, bool ovr, bool rtnl_held,
 846		      struct netlink_ext_ack *extack)
 847{
 848	struct tc_u_common *tp_c = tp->data;
 849	struct tc_u_hnode *ht;
 850	struct tc_u_knode *n;
 851	struct tc_u32_sel *s;
 852	struct nlattr *opt = tca[TCA_OPTIONS];
 853	struct nlattr *tb[TCA_U32_MAX + 1];
 854	u32 htid, flags = 0;
 855	size_t sel_size;
 856	int err;
 857#ifdef CONFIG_CLS_U32_PERF
 858	size_t size;
 859#endif
 860
 861	if (!opt) {
 862		if (handle) {
 863			NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
 864			return -EINVAL;
 865		} else {
 866			return 0;
 867		}
 868	}
 869
 870	err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
 871					  extack);
 872	if (err < 0)
 873		return err;
 874
 875	if (tb[TCA_U32_FLAGS]) {
 876		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
 877		if (!tc_flags_valid(flags)) {
 878			NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
 879			return -EINVAL;
 880		}
 881	}
 882
 883	n = *arg;
 884	if (n) {
 885		struct tc_u_knode *new;
 886
 887		if (TC_U32_KEY(n->handle) == 0) {
 888			NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
 889			return -EINVAL;
 890		}
 891
 892		if ((n->flags ^ flags) &
 893		    ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
 894			NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
 895			return -EINVAL;
 896		}
 897
 898		new = u32_init_knode(net, tp, n);
 899		if (!new)
 900			return -ENOMEM;
 901
 902		err = u32_set_parms(net, tp, base, new, tb,
 903				    tca[TCA_RATE], ovr, extack);
 904
 905		if (err) {
 906			u32_destroy_key(new, false);
 907			return err;
 908		}
 909
 910		err = u32_replace_hw_knode(tp, new, flags, extack);
 911		if (err) {
 912			u32_destroy_key(new, false);
 913			return err;
 914		}
 915
 916		if (!tc_in_hw(new->flags))
 917			new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 918
 919		u32_replace_knode(tp, tp_c, new);
 920		tcf_unbind_filter(tp, &n->res);
 921		tcf_exts_get_net(&n->exts);
 922		tcf_queue_work(&n->rwork, u32_delete_key_work);
 923		return 0;
 924	}
 925
 926	if (tb[TCA_U32_DIVISOR]) {
 927		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
 928
 929		if (!is_power_of_2(divisor)) {
 930			NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
 931			return -EINVAL;
 932		}
 933		if (divisor-- > 0x100) {
 934			NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
 935			return -EINVAL;
 936		}
 937		if (TC_U32_KEY(handle)) {
 938			NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
 939			return -EINVAL;
 940		}
 941		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
 942		if (ht == NULL)
 943			return -ENOBUFS;
 944		if (handle == 0) {
 945			handle = gen_new_htid(tp->data, ht);
 946			if (handle == 0) {
 947				kfree(ht);
 948				return -ENOMEM;
 949			}
 950		} else {
 951			err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
 952					    handle, GFP_KERNEL);
 953			if (err) {
 954				kfree(ht);
 955				return err;
 956			}
 957		}
 958		ht->refcnt = 1;
 959		ht->divisor = divisor;
 960		ht->handle = handle;
 961		ht->prio = tp->prio;
 962		idr_init(&ht->handle_idr);
 963		ht->flags = flags;
 964
 965		err = u32_replace_hw_hnode(tp, ht, flags, extack);
 966		if (err) {
 967			idr_remove(&tp_c->handle_idr, handle);
 968			kfree(ht);
 969			return err;
 970		}
 971
 972		RCU_INIT_POINTER(ht->next, tp_c->hlist);
 973		rcu_assign_pointer(tp_c->hlist, ht);
 974		*arg = ht;
 975
 976		return 0;
 977	}
 978
 979	if (tb[TCA_U32_HASH]) {
 980		htid = nla_get_u32(tb[TCA_U32_HASH]);
 981		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
 982			ht = rtnl_dereference(tp->root);
 983			htid = ht->handle;
 984		} else {
 985			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
 986			if (!ht) {
 987				NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
 988				return -EINVAL;
 989			}
 990		}
 991	} else {
 992		ht = rtnl_dereference(tp->root);
 993		htid = ht->handle;
 994	}
 995
 996	if (ht->divisor < TC_U32_HASH(htid)) {
 997		NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
 998		return -EINVAL;
 999	}
1000
1001	if (handle) {
1002		if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
1003			NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
1004			return -EINVAL;
1005		}
1006		handle = htid | TC_U32_NODE(handle);
1007		err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1008				    GFP_KERNEL);
1009		if (err)
1010			return err;
1011	} else
1012		handle = gen_new_kid(ht, htid);
1013
1014	if (tb[TCA_U32_SEL] == NULL) {
1015		NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1016		err = -EINVAL;
1017		goto erridr;
1018	}
1019
1020	s = nla_data(tb[TCA_U32_SEL]);
1021	sel_size = struct_size(s, keys, s->nkeys);
1022	if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1023		err = -EINVAL;
1024		goto erridr;
1025	}
1026
1027	n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
1028	if (n == NULL) {
1029		err = -ENOBUFS;
1030		goto erridr;
1031	}
1032
1033#ifdef CONFIG_CLS_U32_PERF
1034	size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
1035	n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
1036	if (!n->pf) {
1037		err = -ENOBUFS;
1038		goto errfree;
1039	}
1040#endif
1041
1042	memcpy(&n->sel, s, sel_size);
1043	RCU_INIT_POINTER(n->ht_up, ht);
1044	n->handle = handle;
1045	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1046	n->flags = flags;
1047
1048	err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1049	if (err < 0)
1050		goto errout;
1051
1052#ifdef CONFIG_CLS_U32_MARK
1053	n->pcpu_success = alloc_percpu(u32);
1054	if (!n->pcpu_success) {
1055		err = -ENOMEM;
1056		goto errout;
1057	}
1058
1059	if (tb[TCA_U32_MARK]) {
1060		struct tc_u32_mark *mark;
1061
1062		mark = nla_data(tb[TCA_U32_MARK]);
1063		n->val = mark->val;
1064		n->mask = mark->mask;
1065	}
1066#endif
1067
1068	err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
1069			    extack);
1070	if (err == 0) {
1071		struct tc_u_knode __rcu **ins;
1072		struct tc_u_knode *pins;
1073
1074		err = u32_replace_hw_knode(tp, n, flags, extack);
1075		if (err)
1076			goto errhw;
1077
1078		if (!tc_in_hw(n->flags))
1079			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1080
1081		ins = &ht->ht[TC_U32_HASH(handle)];
1082		for (pins = rtnl_dereference(*ins); pins;
1083		     ins = &pins->next, pins = rtnl_dereference(*ins))
1084			if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1085				break;
1086
1087		RCU_INIT_POINTER(n->next, pins);
1088		rcu_assign_pointer(*ins, n);
1089		tp_c->knodes++;
1090		*arg = n;
1091		return 0;
1092	}
1093
1094errhw:
1095#ifdef CONFIG_CLS_U32_MARK
1096	free_percpu(n->pcpu_success);
1097#endif
1098
1099errout:
1100	tcf_exts_destroy(&n->exts);
1101#ifdef CONFIG_CLS_U32_PERF
1102errfree:
1103	free_percpu(n->pf);
1104#endif
1105	kfree(n);
1106erridr:
1107	idr_remove(&ht->handle_idr, handle);
1108	return err;
1109}
1110
1111static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1112		     bool rtnl_held)
1113{
1114	struct tc_u_common *tp_c = tp->data;
1115	struct tc_u_hnode *ht;
1116	struct tc_u_knode *n;
1117	unsigned int h;
1118
1119	if (arg->stop)
1120		return;
1121
1122	for (ht = rtnl_dereference(tp_c->hlist);
1123	     ht;
1124	     ht = rtnl_dereference(ht->next)) {
1125		if (ht->prio != tp->prio)
1126			continue;
1127		if (arg->count >= arg->skip) {
1128			if (arg->fn(tp, ht, arg) < 0) {
1129				arg->stop = 1;
1130				return;
1131			}
1132		}
1133		arg->count++;
1134		for (h = 0; h <= ht->divisor; h++) {
1135			for (n = rtnl_dereference(ht->ht[h]);
1136			     n;
1137			     n = rtnl_dereference(n->next)) {
1138				if (arg->count < arg->skip) {
1139					arg->count++;
1140					continue;
1141				}
1142				if (arg->fn(tp, n, arg) < 0) {
1143					arg->stop = 1;
1144					return;
1145				}
1146				arg->count++;
1147			}
1148		}
1149	}
1150}
1151
1152static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1153			       bool add, flow_setup_cb_t *cb, void *cb_priv,
1154			       struct netlink_ext_ack *extack)
1155{
1156	struct tc_cls_u32_offload cls_u32 = {};
1157	int err;
1158
1159	tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1160	cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1161	cls_u32.hnode.divisor = ht->divisor;
1162	cls_u32.hnode.handle = ht->handle;
1163	cls_u32.hnode.prio = ht->prio;
1164
1165	err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1166	if (err && add && tc_skip_sw(ht->flags))
1167		return err;
1168
1169	return 0;
1170}
1171
1172static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1173			       bool add, flow_setup_cb_t *cb, void *cb_priv,
1174			       struct netlink_ext_ack *extack)
1175{
1176	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1177	struct tcf_block *block = tp->chain->block;
1178	struct tc_cls_u32_offload cls_u32 = {};
1179	int err;
1180
1181	tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1182	cls_u32.command = add ?
1183		TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1184	cls_u32.knode.handle = n->handle;
1185
1186	if (add) {
1187		cls_u32.knode.fshift = n->fshift;
1188#ifdef CONFIG_CLS_U32_MARK
1189		cls_u32.knode.val = n->val;
1190		cls_u32.knode.mask = n->mask;
1191#else
1192		cls_u32.knode.val = 0;
1193		cls_u32.knode.mask = 0;
1194#endif
1195		cls_u32.knode.sel = &n->sel;
1196		cls_u32.knode.res = &n->res;
1197		cls_u32.knode.exts = &n->exts;
1198		if (n->ht_down)
1199			cls_u32.knode.link_handle = ht->handle;
1200	}
1201
1202	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1203				    &cls_u32, cb_priv, &n->flags,
1204				    &n->in_hw_count);
1205	if (err)
1206		return err;
1207
1208	return 0;
1209}
1210
1211static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1212			 void *cb_priv, struct netlink_ext_ack *extack)
1213{
1214	struct tc_u_common *tp_c = tp->data;
1215	struct tc_u_hnode *ht;
1216	struct tc_u_knode *n;
1217	unsigned int h;
1218	int err;
1219
1220	for (ht = rtnl_dereference(tp_c->hlist);
1221	     ht;
1222	     ht = rtnl_dereference(ht->next)) {
1223		if (ht->prio != tp->prio)
1224			continue;
1225
1226		/* When adding filters to a new dev, try to offload the
1227		 * hashtable first. When removing, do the filters before the
1228		 * hashtable.
1229		 */
1230		if (add && !tc_skip_hw(ht->flags)) {
1231			err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1232						  extack);
1233			if (err)
1234				return err;
1235		}
1236
1237		for (h = 0; h <= ht->divisor; h++) {
1238			for (n = rtnl_dereference(ht->ht[h]);
1239			     n;
1240			     n = rtnl_dereference(n->next)) {
1241				if (tc_skip_hw(n->flags))
1242					continue;
1243
1244				err = u32_reoffload_knode(tp, n, add, cb,
1245							  cb_priv, extack);
1246				if (err)
1247					return err;
1248			}
1249		}
1250
1251		if (!add && !tc_skip_hw(ht->flags))
1252			u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1253	}
1254
1255	return 0;
1256}
1257
1258static void u32_bind_class(void *fh, u32 classid, unsigned long cl)
 
1259{
1260	struct tc_u_knode *n = fh;
1261
1262	if (n && n->res.classid == classid)
1263		n->res.class = cl;
 
 
 
 
1264}
1265
1266static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1267		    struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1268{
1269	struct tc_u_knode *n = fh;
1270	struct tc_u_hnode *ht_up, *ht_down;
1271	struct nlattr *nest;
1272
1273	if (n == NULL)
1274		return skb->len;
1275
1276	t->tcm_handle = n->handle;
1277
1278	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1279	if (nest == NULL)
1280		goto nla_put_failure;
1281
1282	if (TC_U32_KEY(n->handle) == 0) {
1283		struct tc_u_hnode *ht = fh;
1284		u32 divisor = ht->divisor + 1;
1285
1286		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1287			goto nla_put_failure;
1288	} else {
1289#ifdef CONFIG_CLS_U32_PERF
1290		struct tc_u32_pcnt *gpf;
1291		int cpu;
1292#endif
1293
1294		if (nla_put(skb, TCA_U32_SEL,
1295			    sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
1296			    &n->sel))
1297			goto nla_put_failure;
1298
1299		ht_up = rtnl_dereference(n->ht_up);
1300		if (ht_up) {
1301			u32 htid = n->handle & 0xFFFFF000;
1302			if (nla_put_u32(skb, TCA_U32_HASH, htid))
1303				goto nla_put_failure;
1304		}
1305		if (n->res.classid &&
1306		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1307			goto nla_put_failure;
1308
1309		ht_down = rtnl_dereference(n->ht_down);
1310		if (ht_down &&
1311		    nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1312			goto nla_put_failure;
1313
1314		if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1315			goto nla_put_failure;
1316
1317#ifdef CONFIG_CLS_U32_MARK
1318		if ((n->val || n->mask)) {
1319			struct tc_u32_mark mark = {.val = n->val,
1320						   .mask = n->mask,
1321						   .success = 0};
1322			int cpum;
1323
1324			for_each_possible_cpu(cpum) {
1325				__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1326
1327				mark.success += cnt;
1328			}
1329
1330			if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1331				goto nla_put_failure;
1332		}
1333#endif
1334
1335		if (tcf_exts_dump(skb, &n->exts) < 0)
1336			goto nla_put_failure;
1337
1338		if (n->ifindex) {
1339			struct net_device *dev;
1340			dev = __dev_get_by_index(net, n->ifindex);
1341			if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1342				goto nla_put_failure;
1343		}
1344#ifdef CONFIG_CLS_U32_PERF
1345		gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
1346			      n->sel.nkeys * sizeof(u64),
1347			      GFP_KERNEL);
1348		if (!gpf)
1349			goto nla_put_failure;
1350
1351		for_each_possible_cpu(cpu) {
1352			int i;
1353			struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1354
1355			gpf->rcnt += pf->rcnt;
1356			gpf->rhit += pf->rhit;
1357			for (i = 0; i < n->sel.nkeys; i++)
1358				gpf->kcnts[i] += pf->kcnts[i];
1359		}
1360
1361		if (nla_put_64bit(skb, TCA_U32_PCNT,
1362				  sizeof(struct tc_u32_pcnt) +
1363				  n->sel.nkeys * sizeof(u64),
1364				  gpf, TCA_U32_PAD)) {
1365			kfree(gpf);
1366			goto nla_put_failure;
1367		}
1368		kfree(gpf);
1369#endif
1370	}
1371
1372	nla_nest_end(skb, nest);
1373
1374	if (TC_U32_KEY(n->handle))
1375		if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1376			goto nla_put_failure;
1377	return skb->len;
1378
1379nla_put_failure:
1380	nla_nest_cancel(skb, nest);
1381	return -1;
1382}
1383
1384static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1385	.kind		=	"u32",
1386	.classify	=	u32_classify,
1387	.init		=	u32_init,
1388	.destroy	=	u32_destroy,
1389	.get		=	u32_get,
1390	.change		=	u32_change,
1391	.delete		=	u32_delete,
1392	.walk		=	u32_walk,
1393	.reoffload	=	u32_reoffload,
1394	.dump		=	u32_dump,
1395	.bind_class	=	u32_bind_class,
1396	.owner		=	THIS_MODULE,
1397};
1398
1399static int __init init_u32(void)
1400{
1401	int i, ret;
1402
1403	pr_info("u32 classifier\n");
1404#ifdef CONFIG_CLS_U32_PERF
1405	pr_info("    Performance counters on\n");
1406#endif
1407	pr_info("    input device check on\n");
1408#ifdef CONFIG_NET_CLS_ACT
1409	pr_info("    Actions configured\n");
1410#endif
1411	tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1412					  sizeof(struct hlist_head),
1413					  GFP_KERNEL);
1414	if (!tc_u_common_hash)
1415		return -ENOMEM;
1416
1417	for (i = 0; i < U32_HASH_SIZE; i++)
1418		INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1419
1420	ret = register_tcf_proto_ops(&cls_u32_ops);
1421	if (ret)
1422		kvfree(tc_u_common_hash);
1423	return ret;
1424}
1425
1426static void __exit exit_u32(void)
1427{
1428	unregister_tcf_proto_ops(&cls_u32_ops);
1429	kvfree(tc_u_common_hash);
1430}
1431
1432module_init(init_u32)
1433module_exit(exit_u32)
1434MODULE_LICENSE("GPL");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
   4 *
   5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *
   7 *	The filters are packed to hash tables of key nodes
   8 *	with a set of 32bit key/mask pairs at every node.
   9 *	Nodes reference next level hash tables etc.
  10 *
  11 *	This scheme is the best universal classifier I managed to
  12 *	invent; it is not super-fast, but it is not slow (provided you
  13 *	program it correctly), and general enough.  And its relative
  14 *	speed grows as the number of rules becomes larger.
  15 *
  16 *	It seems that it represents the best middle point between
  17 *	speed and manageability both by human and by machine.
  18 *
  19 *	It is especially useful for link sharing combined with QoS;
  20 *	pure RSVP doesn't need such a general approach and can use
  21 *	much simpler (and faster) schemes, sort of cls_rsvp.c.
  22 *
  23 *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/types.h>
  29#include <linux/kernel.h>
  30#include <linux/string.h>
  31#include <linux/errno.h>
  32#include <linux/percpu.h>
  33#include <linux/rtnetlink.h>
  34#include <linux/skbuff.h>
  35#include <linux/bitmap.h>
  36#include <linux/netdevice.h>
  37#include <linux/hash.h>
  38#include <net/netlink.h>
  39#include <net/act_api.h>
  40#include <net/pkt_cls.h>
  41#include <linux/idr.h>
  42
  43struct tc_u_knode {
  44	struct tc_u_knode __rcu	*next;
  45	u32			handle;
  46	struct tc_u_hnode __rcu	*ht_up;
  47	struct tcf_exts		exts;
  48	int			ifindex;
  49	u8			fshift;
  50	struct tcf_result	res;
  51	struct tc_u_hnode __rcu	*ht_down;
  52#ifdef CONFIG_CLS_U32_PERF
  53	struct tc_u32_pcnt __percpu *pf;
  54#endif
  55	u32			flags;
  56	unsigned int		in_hw_count;
  57#ifdef CONFIG_CLS_U32_MARK
  58	u32			val;
  59	u32			mask;
  60	u32 __percpu		*pcpu_success;
  61#endif
  62	struct rcu_work		rwork;
  63	/* The 'sel' field MUST be the last field in structure to allow for
  64	 * tc_u32_keys allocated at end of structure.
  65	 */
  66	struct tc_u32_sel	sel;
  67};
  68
  69struct tc_u_hnode {
  70	struct tc_u_hnode __rcu	*next;
  71	u32			handle;
  72	u32			prio;
  73	int			refcnt;
  74	unsigned int		divisor;
  75	struct idr		handle_idr;
  76	bool			is_root;
  77	struct rcu_head		rcu;
  78	u32			flags;
  79	/* The 'ht' field MUST be the last field in structure to allow for
  80	 * more entries allocated at end of structure.
  81	 */
  82	struct tc_u_knode __rcu	*ht[];
  83};
  84
  85struct tc_u_common {
  86	struct tc_u_hnode __rcu	*hlist;
  87	void			*ptr;
  88	int			refcnt;
  89	struct idr		handle_idr;
  90	struct hlist_node	hnode;
  91	long			knodes;
  92};
  93
  94static inline unsigned int u32_hash_fold(__be32 key,
  95					 const struct tc_u32_sel *sel,
  96					 u8 fshift)
  97{
  98	unsigned int h = ntohl(key & sel->hmask) >> fshift;
  99
 100	return h;
 101}
 102
 103static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 104			struct tcf_result *res)
 105{
 106	struct {
 107		struct tc_u_knode *knode;
 108		unsigned int	  off;
 109	} stack[TC_U32_MAXDEPTH];
 110
 111	struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
 112	unsigned int off = skb_network_offset(skb);
 113	struct tc_u_knode *n;
 114	int sdepth = 0;
 115	int off2 = 0;
 116	int sel = 0;
 117#ifdef CONFIG_CLS_U32_PERF
 118	int j;
 119#endif
 120	int i, r;
 121
 122next_ht:
 123	n = rcu_dereference_bh(ht->ht[sel]);
 124
 125next_knode:
 126	if (n) {
 127		struct tc_u32_key *key = n->sel.keys;
 128
 129#ifdef CONFIG_CLS_U32_PERF
 130		__this_cpu_inc(n->pf->rcnt);
 131		j = 0;
 132#endif
 133
 134		if (tc_skip_sw(n->flags)) {
 135			n = rcu_dereference_bh(n->next);
 136			goto next_knode;
 137		}
 138
 139#ifdef CONFIG_CLS_U32_MARK
 140		if ((skb->mark & n->mask) != n->val) {
 141			n = rcu_dereference_bh(n->next);
 142			goto next_knode;
 143		} else {
 144			__this_cpu_inc(*n->pcpu_success);
 145		}
 146#endif
 147
 148		for (i = n->sel.nkeys; i > 0; i--, key++) {
 149			int toff = off + key->off + (off2 & key->offmask);
 150			__be32 *data, hdata;
 151
 152			if (skb_headroom(skb) + toff > INT_MAX)
 153				goto out;
 154
 155			data = skb_header_pointer(skb, toff, 4, &hdata);
 156			if (!data)
 157				goto out;
 158			if ((*data ^ key->val) & key->mask) {
 159				n = rcu_dereference_bh(n->next);
 160				goto next_knode;
 161			}
 162#ifdef CONFIG_CLS_U32_PERF
 163			__this_cpu_inc(n->pf->kcnts[j]);
 164			j++;
 165#endif
 166		}
 167
 168		ht = rcu_dereference_bh(n->ht_down);
 169		if (!ht) {
 170check_terminal:
 171			if (n->sel.flags & TC_U32_TERMINAL) {
 172
 173				*res = n->res;
 174				if (!tcf_match_indev(skb, n->ifindex)) {
 175					n = rcu_dereference_bh(n->next);
 176					goto next_knode;
 177				}
 178#ifdef CONFIG_CLS_U32_PERF
 179				__this_cpu_inc(n->pf->rhit);
 180#endif
 181				r = tcf_exts_exec(skb, &n->exts, res);
 182				if (r < 0) {
 183					n = rcu_dereference_bh(n->next);
 184					goto next_knode;
 185				}
 186
 187				return r;
 188			}
 189			n = rcu_dereference_bh(n->next);
 190			goto next_knode;
 191		}
 192
 193		/* PUSH */
 194		if (sdepth >= TC_U32_MAXDEPTH)
 195			goto deadloop;
 196		stack[sdepth].knode = n;
 197		stack[sdepth].off = off;
 198		sdepth++;
 199
 200		ht = rcu_dereference_bh(n->ht_down);
 201		sel = 0;
 202		if (ht->divisor) {
 203			__be32 *data, hdata;
 204
 205			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
 206						  &hdata);
 207			if (!data)
 208				goto out;
 209			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
 210							  n->fshift);
 211		}
 212		if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
 213			goto next_ht;
 214
 215		if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
 216			off2 = n->sel.off + 3;
 217			if (n->sel.flags & TC_U32_VAROFFSET) {
 218				__be16 *data, hdata;
 219
 220				data = skb_header_pointer(skb,
 221							  off + n->sel.offoff,
 222							  2, &hdata);
 223				if (!data)
 224					goto out;
 225				off2 += ntohs(n->sel.offmask & *data) >>
 226					n->sel.offshift;
 227			}
 228			off2 &= ~3;
 229		}
 230		if (n->sel.flags & TC_U32_EAT) {
 231			off += off2;
 232			off2 = 0;
 233		}
 234
 235		if (off < skb->len)
 236			goto next_ht;
 237	}
 238
 239	/* POP */
 240	if (sdepth--) {
 241		n = stack[sdepth].knode;
 242		ht = rcu_dereference_bh(n->ht_up);
 243		off = stack[sdepth].off;
 244		goto check_terminal;
 245	}
 246out:
 247	return -1;
 248
 249deadloop:
 250	net_warn_ratelimited("cls_u32: dead loop\n");
 251	return -1;
 252}
 253
 254static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
 255{
 256	struct tc_u_hnode *ht;
 257
 258	for (ht = rtnl_dereference(tp_c->hlist);
 259	     ht;
 260	     ht = rtnl_dereference(ht->next))
 261		if (ht->handle == handle)
 262			break;
 263
 264	return ht;
 265}
 266
 267static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
 268{
 269	unsigned int sel;
 270	struct tc_u_knode *n = NULL;
 271
 272	sel = TC_U32_HASH(handle);
 273	if (sel > ht->divisor)
 274		goto out;
 275
 276	for (n = rtnl_dereference(ht->ht[sel]);
 277	     n;
 278	     n = rtnl_dereference(n->next))
 279		if (n->handle == handle)
 280			break;
 281out:
 282	return n;
 283}
 284
 285
 286static void *u32_get(struct tcf_proto *tp, u32 handle)
 287{
 288	struct tc_u_hnode *ht;
 289	struct tc_u_common *tp_c = tp->data;
 290
 291	if (TC_U32_HTID(handle) == TC_U32_ROOT)
 292		ht = rtnl_dereference(tp->root);
 293	else
 294		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
 295
 296	if (!ht)
 297		return NULL;
 298
 299	if (TC_U32_KEY(handle) == 0)
 300		return ht;
 301
 302	return u32_lookup_key(ht, handle);
 303}
 304
 305/* Protected by rtnl lock */
 306static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
 307{
 308	int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
 309	if (id < 0)
 310		return 0;
 311	return (id | 0x800U) << 20;
 312}
 313
 314static struct hlist_head *tc_u_common_hash;
 315
 316#define U32_HASH_SHIFT 10
 317#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
 318
 319static void *tc_u_common_ptr(const struct tcf_proto *tp)
 320{
 321	struct tcf_block *block = tp->chain->block;
 322
 323	/* The block sharing is currently supported only
 324	 * for classless qdiscs. In that case we use block
 325	 * for tc_u_common identification. In case the
 326	 * block is not shared, block->q is a valid pointer
 327	 * and we can use that. That works for classful qdiscs.
 328	 */
 329	if (tcf_block_shared(block))
 330		return block;
 331	else
 332		return block->q;
 333}
 334
 335static struct hlist_head *tc_u_hash(void *key)
 336{
 337	return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
 338}
 339
 340static struct tc_u_common *tc_u_common_find(void *key)
 341{
 342	struct tc_u_common *tc;
 343	hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
 344		if (tc->ptr == key)
 345			return tc;
 346	}
 347	return NULL;
 348}
 349
 350static int u32_init(struct tcf_proto *tp)
 351{
 352	struct tc_u_hnode *root_ht;
 353	void *key = tc_u_common_ptr(tp);
 354	struct tc_u_common *tp_c = tc_u_common_find(key);
 355
 356	root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
 357	if (root_ht == NULL)
 358		return -ENOBUFS;
 359
 360	root_ht->refcnt++;
 361	root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
 362	root_ht->prio = tp->prio;
 363	root_ht->is_root = true;
 364	idr_init(&root_ht->handle_idr);
 365
 366	if (tp_c == NULL) {
 367		tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
 368		if (tp_c == NULL) {
 369			kfree(root_ht);
 370			return -ENOBUFS;
 371		}
 372		tp_c->ptr = key;
 373		INIT_HLIST_NODE(&tp_c->hnode);
 374		idr_init(&tp_c->handle_idr);
 375
 376		hlist_add_head(&tp_c->hnode, tc_u_hash(key));
 377	}
 378
 379	tp_c->refcnt++;
 380	RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
 381	rcu_assign_pointer(tp_c->hlist, root_ht);
 382
 383	root_ht->refcnt++;
 384	rcu_assign_pointer(tp->root, root_ht);
 385	tp->data = tp_c;
 386	return 0;
 387}
 388
 389static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
 390{
 391	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 392
 393	tcf_exts_destroy(&n->exts);
 394	tcf_exts_put_net(&n->exts);
 395	if (ht && --ht->refcnt == 0)
 396		kfree(ht);
 397#ifdef CONFIG_CLS_U32_PERF
 398	if (free_pf)
 399		free_percpu(n->pf);
 400#endif
 401#ifdef CONFIG_CLS_U32_MARK
 402	if (free_pf)
 403		free_percpu(n->pcpu_success);
 404#endif
 405	kfree(n);
 406	return 0;
 407}
 408
 409/* u32_delete_key_rcu should be called when free'ing a copied
 410 * version of a tc_u_knode obtained from u32_init_knode(). When
 411 * copies are obtained from u32_init_knode() the statistics are
 412 * shared between the old and new copies to allow readers to
 413 * continue to update the statistics during the copy. To support
 414 * this the u32_delete_key_rcu variant does not free the percpu
 415 * statistics.
 416 */
 417static void u32_delete_key_work(struct work_struct *work)
 418{
 419	struct tc_u_knode *key = container_of(to_rcu_work(work),
 420					      struct tc_u_knode,
 421					      rwork);
 422	rtnl_lock();
 423	u32_destroy_key(key, false);
 424	rtnl_unlock();
 425}
 426
 427/* u32_delete_key_freepf_rcu is the rcu callback variant
 428 * that free's the entire structure including the statistics
 429 * percpu variables. Only use this if the key is not a copy
 430 * returned by u32_init_knode(). See u32_delete_key_rcu()
 431 * for the variant that should be used with keys return from
 432 * u32_init_knode()
 433 */
 434static void u32_delete_key_freepf_work(struct work_struct *work)
 435{
 436	struct tc_u_knode *key = container_of(to_rcu_work(work),
 437					      struct tc_u_knode,
 438					      rwork);
 439	rtnl_lock();
 440	u32_destroy_key(key, true);
 441	rtnl_unlock();
 442}
 443
 444static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
 445{
 446	struct tc_u_common *tp_c = tp->data;
 447	struct tc_u_knode __rcu **kp;
 448	struct tc_u_knode *pkp;
 449	struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
 450
 451	if (ht) {
 452		kp = &ht->ht[TC_U32_HASH(key->handle)];
 453		for (pkp = rtnl_dereference(*kp); pkp;
 454		     kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
 455			if (pkp == key) {
 456				RCU_INIT_POINTER(*kp, key->next);
 457				tp_c->knodes--;
 458
 459				tcf_unbind_filter(tp, &key->res);
 460				idr_remove(&ht->handle_idr, key->handle);
 461				tcf_exts_get_net(&key->exts);
 462				tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
 463				return 0;
 464			}
 465		}
 466	}
 467	WARN_ON(1);
 468	return 0;
 469}
 470
 471static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 472			       struct netlink_ext_ack *extack)
 473{
 474	struct tcf_block *block = tp->chain->block;
 475	struct tc_cls_u32_offload cls_u32 = {};
 476
 477	tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
 478	cls_u32.command = TC_CLSU32_DELETE_HNODE;
 479	cls_u32.hnode.divisor = h->divisor;
 480	cls_u32.hnode.handle = h->handle;
 481	cls_u32.hnode.prio = h->prio;
 482
 483	tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
 484}
 485
 486static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
 487				u32 flags, struct netlink_ext_ack *extack)
 488{
 489	struct tcf_block *block = tp->chain->block;
 490	struct tc_cls_u32_offload cls_u32 = {};
 491	bool skip_sw = tc_skip_sw(flags);
 492	bool offloaded = false;
 493	int err;
 494
 495	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
 496	cls_u32.command = TC_CLSU32_NEW_HNODE;
 497	cls_u32.hnode.divisor = h->divisor;
 498	cls_u32.hnode.handle = h->handle;
 499	cls_u32.hnode.prio = h->prio;
 500
 501	err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
 502	if (err < 0) {
 503		u32_clear_hw_hnode(tp, h, NULL);
 504		return err;
 505	} else if (err > 0) {
 506		offloaded = true;
 507	}
 508
 509	if (skip_sw && !offloaded)
 510		return -EINVAL;
 511
 512	return 0;
 513}
 514
 515static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 516				struct netlink_ext_ack *extack)
 517{
 518	struct tcf_block *block = tp->chain->block;
 519	struct tc_cls_u32_offload cls_u32 = {};
 520
 521	tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
 522	cls_u32.command = TC_CLSU32_DELETE_KNODE;
 523	cls_u32.knode.handle = n->handle;
 524
 525	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
 526			    &n->flags, &n->in_hw_count, true);
 527}
 528
 529static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 530				u32 flags, struct netlink_ext_ack *extack)
 531{
 532	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 533	struct tcf_block *block = tp->chain->block;
 534	struct tc_cls_u32_offload cls_u32 = {};
 535	bool skip_sw = tc_skip_sw(flags);
 536	int err;
 537
 538	tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
 539	cls_u32.command = TC_CLSU32_REPLACE_KNODE;
 540	cls_u32.knode.handle = n->handle;
 541	cls_u32.knode.fshift = n->fshift;
 542#ifdef CONFIG_CLS_U32_MARK
 543	cls_u32.knode.val = n->val;
 544	cls_u32.knode.mask = n->mask;
 545#else
 546	cls_u32.knode.val = 0;
 547	cls_u32.knode.mask = 0;
 548#endif
 549	cls_u32.knode.sel = &n->sel;
 550	cls_u32.knode.res = &n->res;
 551	cls_u32.knode.exts = &n->exts;
 552	if (n->ht_down)
 553		cls_u32.knode.link_handle = ht->handle;
 554
 555	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
 556			      &n->flags, &n->in_hw_count, true);
 557	if (err) {
 558		u32_remove_hw_knode(tp, n, NULL);
 559		return err;
 560	}
 561
 562	if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
 563		return -EINVAL;
 564
 565	return 0;
 566}
 567
 568static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
 569			    struct netlink_ext_ack *extack)
 570{
 571	struct tc_u_common *tp_c = tp->data;
 572	struct tc_u_knode *n;
 573	unsigned int h;
 574
 575	for (h = 0; h <= ht->divisor; h++) {
 576		while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
 577			RCU_INIT_POINTER(ht->ht[h],
 578					 rtnl_dereference(n->next));
 579			tp_c->knodes--;
 580			tcf_unbind_filter(tp, &n->res);
 581			u32_remove_hw_knode(tp, n, extack);
 582			idr_remove(&ht->handle_idr, n->handle);
 583			if (tcf_exts_get_net(&n->exts))
 584				tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
 585			else
 586				u32_destroy_key(n, true);
 587		}
 588	}
 589}
 590
 591static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
 592			     struct netlink_ext_ack *extack)
 593{
 594	struct tc_u_common *tp_c = tp->data;
 595	struct tc_u_hnode __rcu **hn;
 596	struct tc_u_hnode *phn;
 597
 598	WARN_ON(--ht->refcnt);
 599
 600	u32_clear_hnode(tp, ht, extack);
 601
 602	hn = &tp_c->hlist;
 603	for (phn = rtnl_dereference(*hn);
 604	     phn;
 605	     hn = &phn->next, phn = rtnl_dereference(*hn)) {
 606		if (phn == ht) {
 607			u32_clear_hw_hnode(tp, ht, extack);
 608			idr_destroy(&ht->handle_idr);
 609			idr_remove(&tp_c->handle_idr, ht->handle);
 610			RCU_INIT_POINTER(*hn, ht->next);
 611			kfree_rcu(ht, rcu);
 612			return 0;
 613		}
 614	}
 615
 616	return -ENOENT;
 617}
 618
 619static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
 620			struct netlink_ext_ack *extack)
 621{
 622	struct tc_u_common *tp_c = tp->data;
 623	struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
 624
 625	WARN_ON(root_ht == NULL);
 626
 627	if (root_ht && --root_ht->refcnt == 1)
 628		u32_destroy_hnode(tp, root_ht, extack);
 629
 630	if (--tp_c->refcnt == 0) {
 631		struct tc_u_hnode *ht;
 632
 633		hlist_del(&tp_c->hnode);
 634
 635		while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
 636			u32_clear_hnode(tp, ht, extack);
 637			RCU_INIT_POINTER(tp_c->hlist, ht->next);
 638
 639			/* u32_destroy_key() will later free ht for us, if it's
 640			 * still referenced by some knode
 641			 */
 642			if (--ht->refcnt == 0)
 643				kfree_rcu(ht, rcu);
 644		}
 645
 646		idr_destroy(&tp_c->handle_idr);
 647		kfree(tp_c);
 648	}
 649
 650	tp->data = NULL;
 651}
 652
 653static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
 654		      bool rtnl_held, struct netlink_ext_ack *extack)
 655{
 656	struct tc_u_hnode *ht = arg;
 657	struct tc_u_common *tp_c = tp->data;
 658	int ret = 0;
 659
 660	if (TC_U32_KEY(ht->handle)) {
 661		u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
 662		ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
 663		goto out;
 664	}
 665
 666	if (ht->is_root) {
 667		NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
 668		return -EINVAL;
 669	}
 670
 671	if (ht->refcnt == 1) {
 672		u32_destroy_hnode(tp, ht, extack);
 673	} else {
 674		NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
 675		return -EBUSY;
 676	}
 677
 678out:
 679	*last = tp_c->refcnt == 1 && tp_c->knodes == 0;
 680	return ret;
 681}
 682
 683static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
 684{
 685	u32 index = htid | 0x800;
 686	u32 max = htid | 0xFFF;
 687
 688	if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
 689		index = htid + 1;
 690		if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
 691				 GFP_KERNEL))
 692			index = max;
 693	}
 694
 695	return index;
 696}
 697
 698static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
 699	[TCA_U32_CLASSID]	= { .type = NLA_U32 },
 700	[TCA_U32_HASH]		= { .type = NLA_U32 },
 701	[TCA_U32_LINK]		= { .type = NLA_U32 },
 702	[TCA_U32_DIVISOR]	= { .type = NLA_U32 },
 703	[TCA_U32_SEL]		= { .len = sizeof(struct tc_u32_sel) },
 704	[TCA_U32_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
 705	[TCA_U32_MARK]		= { .len = sizeof(struct tc_u32_mark) },
 706	[TCA_U32_FLAGS]		= { .type = NLA_U32 },
 707};
 708
 709static int u32_set_parms(struct net *net, struct tcf_proto *tp,
 710			 unsigned long base,
 711			 struct tc_u_knode *n, struct nlattr **tb,
 712			 struct nlattr *est, bool ovr,
 713			 struct netlink_ext_ack *extack)
 714{
 715	int err;
 716
 717	err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
 718	if (err < 0)
 719		return err;
 720
 721	if (tb[TCA_U32_LINK]) {
 722		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
 723		struct tc_u_hnode *ht_down = NULL, *ht_old;
 724
 725		if (TC_U32_KEY(handle)) {
 726			NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
 727			return -EINVAL;
 728		}
 729
 730		if (handle) {
 731			ht_down = u32_lookup_ht(tp->data, handle);
 732
 733			if (!ht_down) {
 734				NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
 735				return -EINVAL;
 736			}
 737			if (ht_down->is_root) {
 738				NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
 739				return -EINVAL;
 740			}
 741			ht_down->refcnt++;
 742		}
 743
 744		ht_old = rtnl_dereference(n->ht_down);
 745		rcu_assign_pointer(n->ht_down, ht_down);
 746
 747		if (ht_old)
 748			ht_old->refcnt--;
 749	}
 750	if (tb[TCA_U32_CLASSID]) {
 751		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
 752		tcf_bind_filter(tp, &n->res, base);
 753	}
 754
 755	if (tb[TCA_U32_INDEV]) {
 756		int ret;
 757		ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
 758		if (ret < 0)
 759			return -EINVAL;
 760		n->ifindex = ret;
 761	}
 762	return 0;
 763}
 764
 765static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
 766			      struct tc_u_knode *n)
 767{
 768	struct tc_u_knode __rcu **ins;
 769	struct tc_u_knode *pins;
 770	struct tc_u_hnode *ht;
 771
 772	if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
 773		ht = rtnl_dereference(tp->root);
 774	else
 775		ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
 776
 777	ins = &ht->ht[TC_U32_HASH(n->handle)];
 778
 779	/* The node must always exist for it to be replaced if this is not the
 780	 * case then something went very wrong elsewhere.
 781	 */
 782	for (pins = rtnl_dereference(*ins); ;
 783	     ins = &pins->next, pins = rtnl_dereference(*ins))
 784		if (pins->handle == n->handle)
 785			break;
 786
 787	idr_replace(&ht->handle_idr, n, n->handle);
 788	RCU_INIT_POINTER(n->next, pins->next);
 789	rcu_assign_pointer(*ins, n);
 790}
 791
 792static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
 793					 struct tc_u_knode *n)
 794{
 795	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 796	struct tc_u32_sel *s = &n->sel;
 797	struct tc_u_knode *new;
 798
 799	new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
 
 
 800	if (!new)
 801		return NULL;
 802
 803	RCU_INIT_POINTER(new->next, n->next);
 804	new->handle = n->handle;
 805	RCU_INIT_POINTER(new->ht_up, n->ht_up);
 806
 807	new->ifindex = n->ifindex;
 808	new->fshift = n->fshift;
 809	new->res = n->res;
 810	new->flags = n->flags;
 811	RCU_INIT_POINTER(new->ht_down, ht);
 812
 813	/* bump reference count as long as we hold pointer to structure */
 814	if (ht)
 815		ht->refcnt++;
 816
 817#ifdef CONFIG_CLS_U32_PERF
 818	/* Statistics may be incremented by readers during update
 819	 * so we must keep them in tact. When the node is later destroyed
 820	 * a special destroy call must be made to not free the pf memory.
 821	 */
 822	new->pf = n->pf;
 823#endif
 824
 825#ifdef CONFIG_CLS_U32_MARK
 826	new->val = n->val;
 827	new->mask = n->mask;
 828	/* Similarly success statistics must be moved as pointers */
 829	new->pcpu_success = n->pcpu_success;
 830#endif
 831	memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
 832
 833	if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
 834		kfree(new);
 835		return NULL;
 836	}
 837
 838	return new;
 839}
 840
 841static int u32_change(struct net *net, struct sk_buff *in_skb,
 842		      struct tcf_proto *tp, unsigned long base, u32 handle,
 843		      struct nlattr **tca, void **arg, bool ovr, bool rtnl_held,
 844		      struct netlink_ext_ack *extack)
 845{
 846	struct tc_u_common *tp_c = tp->data;
 847	struct tc_u_hnode *ht;
 848	struct tc_u_knode *n;
 849	struct tc_u32_sel *s;
 850	struct nlattr *opt = tca[TCA_OPTIONS];
 851	struct nlattr *tb[TCA_U32_MAX + 1];
 852	u32 htid, flags = 0;
 853	size_t sel_size;
 854	int err;
 
 
 
 855
 856	if (!opt) {
 857		if (handle) {
 858			NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
 859			return -EINVAL;
 860		} else {
 861			return 0;
 862		}
 863	}
 864
 865	err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
 866					  extack);
 867	if (err < 0)
 868		return err;
 869
 870	if (tb[TCA_U32_FLAGS]) {
 871		flags = nla_get_u32(tb[TCA_U32_FLAGS]);
 872		if (!tc_flags_valid(flags)) {
 873			NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
 874			return -EINVAL;
 875		}
 876	}
 877
 878	n = *arg;
 879	if (n) {
 880		struct tc_u_knode *new;
 881
 882		if (TC_U32_KEY(n->handle) == 0) {
 883			NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
 884			return -EINVAL;
 885		}
 886
 887		if ((n->flags ^ flags) &
 888		    ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
 889			NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
 890			return -EINVAL;
 891		}
 892
 893		new = u32_init_knode(net, tp, n);
 894		if (!new)
 895			return -ENOMEM;
 896
 897		err = u32_set_parms(net, tp, base, new, tb,
 898				    tca[TCA_RATE], ovr, extack);
 899
 900		if (err) {
 901			u32_destroy_key(new, false);
 902			return err;
 903		}
 904
 905		err = u32_replace_hw_knode(tp, new, flags, extack);
 906		if (err) {
 907			u32_destroy_key(new, false);
 908			return err;
 909		}
 910
 911		if (!tc_in_hw(new->flags))
 912			new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 913
 914		u32_replace_knode(tp, tp_c, new);
 915		tcf_unbind_filter(tp, &n->res);
 916		tcf_exts_get_net(&n->exts);
 917		tcf_queue_work(&n->rwork, u32_delete_key_work);
 918		return 0;
 919	}
 920
 921	if (tb[TCA_U32_DIVISOR]) {
 922		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
 923
 924		if (!is_power_of_2(divisor)) {
 925			NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
 926			return -EINVAL;
 927		}
 928		if (divisor-- > 0x100) {
 929			NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
 930			return -EINVAL;
 931		}
 932		if (TC_U32_KEY(handle)) {
 933			NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
 934			return -EINVAL;
 935		}
 936		ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
 937		if (ht == NULL)
 938			return -ENOBUFS;
 939		if (handle == 0) {
 940			handle = gen_new_htid(tp->data, ht);
 941			if (handle == 0) {
 942				kfree(ht);
 943				return -ENOMEM;
 944			}
 945		} else {
 946			err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
 947					    handle, GFP_KERNEL);
 948			if (err) {
 949				kfree(ht);
 950				return err;
 951			}
 952		}
 953		ht->refcnt = 1;
 954		ht->divisor = divisor;
 955		ht->handle = handle;
 956		ht->prio = tp->prio;
 957		idr_init(&ht->handle_idr);
 958		ht->flags = flags;
 959
 960		err = u32_replace_hw_hnode(tp, ht, flags, extack);
 961		if (err) {
 962			idr_remove(&tp_c->handle_idr, handle);
 963			kfree(ht);
 964			return err;
 965		}
 966
 967		RCU_INIT_POINTER(ht->next, tp_c->hlist);
 968		rcu_assign_pointer(tp_c->hlist, ht);
 969		*arg = ht;
 970
 971		return 0;
 972	}
 973
 974	if (tb[TCA_U32_HASH]) {
 975		htid = nla_get_u32(tb[TCA_U32_HASH]);
 976		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
 977			ht = rtnl_dereference(tp->root);
 978			htid = ht->handle;
 979		} else {
 980			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
 981			if (!ht) {
 982				NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
 983				return -EINVAL;
 984			}
 985		}
 986	} else {
 987		ht = rtnl_dereference(tp->root);
 988		htid = ht->handle;
 989	}
 990
 991	if (ht->divisor < TC_U32_HASH(htid)) {
 992		NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
 993		return -EINVAL;
 994	}
 995
 996	if (handle) {
 997		if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
 998			NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
 999			return -EINVAL;
1000		}
1001		handle = htid | TC_U32_NODE(handle);
1002		err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
1003				    GFP_KERNEL);
1004		if (err)
1005			return err;
1006	} else
1007		handle = gen_new_kid(ht, htid);
1008
1009	if (tb[TCA_U32_SEL] == NULL) {
1010		NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
1011		err = -EINVAL;
1012		goto erridr;
1013	}
1014
1015	s = nla_data(tb[TCA_U32_SEL]);
1016	sel_size = struct_size(s, keys, s->nkeys);
1017	if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
1018		err = -EINVAL;
1019		goto erridr;
1020	}
1021
1022	n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
1023	if (n == NULL) {
1024		err = -ENOBUFS;
1025		goto erridr;
1026	}
1027
1028#ifdef CONFIG_CLS_U32_PERF
1029	n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
1030			       __alignof__(struct tc_u32_pcnt));
1031	if (!n->pf) {
1032		err = -ENOBUFS;
1033		goto errfree;
1034	}
1035#endif
1036
1037	memcpy(&n->sel, s, sel_size);
1038	RCU_INIT_POINTER(n->ht_up, ht);
1039	n->handle = handle;
1040	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
1041	n->flags = flags;
1042
1043	err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
1044	if (err < 0)
1045		goto errout;
1046
1047#ifdef CONFIG_CLS_U32_MARK
1048	n->pcpu_success = alloc_percpu(u32);
1049	if (!n->pcpu_success) {
1050		err = -ENOMEM;
1051		goto errout;
1052	}
1053
1054	if (tb[TCA_U32_MARK]) {
1055		struct tc_u32_mark *mark;
1056
1057		mark = nla_data(tb[TCA_U32_MARK]);
1058		n->val = mark->val;
1059		n->mask = mark->mask;
1060	}
1061#endif
1062
1063	err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
1064			    extack);
1065	if (err == 0) {
1066		struct tc_u_knode __rcu **ins;
1067		struct tc_u_knode *pins;
1068
1069		err = u32_replace_hw_knode(tp, n, flags, extack);
1070		if (err)
1071			goto errhw;
1072
1073		if (!tc_in_hw(n->flags))
1074			n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1075
1076		ins = &ht->ht[TC_U32_HASH(handle)];
1077		for (pins = rtnl_dereference(*ins); pins;
1078		     ins = &pins->next, pins = rtnl_dereference(*ins))
1079			if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
1080				break;
1081
1082		RCU_INIT_POINTER(n->next, pins);
1083		rcu_assign_pointer(*ins, n);
1084		tp_c->knodes++;
1085		*arg = n;
1086		return 0;
1087	}
1088
1089errhw:
1090#ifdef CONFIG_CLS_U32_MARK
1091	free_percpu(n->pcpu_success);
1092#endif
1093
1094errout:
1095	tcf_exts_destroy(&n->exts);
1096#ifdef CONFIG_CLS_U32_PERF
1097errfree:
1098	free_percpu(n->pf);
1099#endif
1100	kfree(n);
1101erridr:
1102	idr_remove(&ht->handle_idr, handle);
1103	return err;
1104}
1105
1106static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1107		     bool rtnl_held)
1108{
1109	struct tc_u_common *tp_c = tp->data;
1110	struct tc_u_hnode *ht;
1111	struct tc_u_knode *n;
1112	unsigned int h;
1113
1114	if (arg->stop)
1115		return;
1116
1117	for (ht = rtnl_dereference(tp_c->hlist);
1118	     ht;
1119	     ht = rtnl_dereference(ht->next)) {
1120		if (ht->prio != tp->prio)
1121			continue;
1122		if (arg->count >= arg->skip) {
1123			if (arg->fn(tp, ht, arg) < 0) {
1124				arg->stop = 1;
1125				return;
1126			}
1127		}
1128		arg->count++;
1129		for (h = 0; h <= ht->divisor; h++) {
1130			for (n = rtnl_dereference(ht->ht[h]);
1131			     n;
1132			     n = rtnl_dereference(n->next)) {
1133				if (arg->count < arg->skip) {
1134					arg->count++;
1135					continue;
1136				}
1137				if (arg->fn(tp, n, arg) < 0) {
1138					arg->stop = 1;
1139					return;
1140				}
1141				arg->count++;
1142			}
1143		}
1144	}
1145}
1146
1147static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
1148			       bool add, flow_setup_cb_t *cb, void *cb_priv,
1149			       struct netlink_ext_ack *extack)
1150{
1151	struct tc_cls_u32_offload cls_u32 = {};
1152	int err;
1153
1154	tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
1155	cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
1156	cls_u32.hnode.divisor = ht->divisor;
1157	cls_u32.hnode.handle = ht->handle;
1158	cls_u32.hnode.prio = ht->prio;
1159
1160	err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
1161	if (err && add && tc_skip_sw(ht->flags))
1162		return err;
1163
1164	return 0;
1165}
1166
1167static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
1168			       bool add, flow_setup_cb_t *cb, void *cb_priv,
1169			       struct netlink_ext_ack *extack)
1170{
1171	struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1172	struct tcf_block *block = tp->chain->block;
1173	struct tc_cls_u32_offload cls_u32 = {};
 
1174
1175	tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
1176	cls_u32.command = add ?
1177		TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
1178	cls_u32.knode.handle = n->handle;
1179
1180	if (add) {
1181		cls_u32.knode.fshift = n->fshift;
1182#ifdef CONFIG_CLS_U32_MARK
1183		cls_u32.knode.val = n->val;
1184		cls_u32.knode.mask = n->mask;
1185#else
1186		cls_u32.knode.val = 0;
1187		cls_u32.knode.mask = 0;
1188#endif
1189		cls_u32.knode.sel = &n->sel;
1190		cls_u32.knode.res = &n->res;
1191		cls_u32.knode.exts = &n->exts;
1192		if (n->ht_down)
1193			cls_u32.knode.link_handle = ht->handle;
1194	}
1195
1196	return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
1197				     &cls_u32, cb_priv, &n->flags,
1198				     &n->in_hw_count);
 
 
 
 
1199}
1200
1201static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1202			 void *cb_priv, struct netlink_ext_ack *extack)
1203{
1204	struct tc_u_common *tp_c = tp->data;
1205	struct tc_u_hnode *ht;
1206	struct tc_u_knode *n;
1207	unsigned int h;
1208	int err;
1209
1210	for (ht = rtnl_dereference(tp_c->hlist);
1211	     ht;
1212	     ht = rtnl_dereference(ht->next)) {
1213		if (ht->prio != tp->prio)
1214			continue;
1215
1216		/* When adding filters to a new dev, try to offload the
1217		 * hashtable first. When removing, do the filters before the
1218		 * hashtable.
1219		 */
1220		if (add && !tc_skip_hw(ht->flags)) {
1221			err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
1222						  extack);
1223			if (err)
1224				return err;
1225		}
1226
1227		for (h = 0; h <= ht->divisor; h++) {
1228			for (n = rtnl_dereference(ht->ht[h]);
1229			     n;
1230			     n = rtnl_dereference(n->next)) {
1231				if (tc_skip_hw(n->flags))
1232					continue;
1233
1234				err = u32_reoffload_knode(tp, n, add, cb,
1235							  cb_priv, extack);
1236				if (err)
1237					return err;
1238			}
1239		}
1240
1241		if (!add && !tc_skip_hw(ht->flags))
1242			u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
1243	}
1244
1245	return 0;
1246}
1247
1248static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
1249			   unsigned long base)
1250{
1251	struct tc_u_knode *n = fh;
1252
1253	if (n && n->res.classid == classid) {
1254		if (cl)
1255			__tcf_bind_filter(q, &n->res, base);
1256		else
1257			__tcf_unbind_filter(q, &n->res);
1258	}
1259}
1260
1261static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
1262		    struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
1263{
1264	struct tc_u_knode *n = fh;
1265	struct tc_u_hnode *ht_up, *ht_down;
1266	struct nlattr *nest;
1267
1268	if (n == NULL)
1269		return skb->len;
1270
1271	t->tcm_handle = n->handle;
1272
1273	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1274	if (nest == NULL)
1275		goto nla_put_failure;
1276
1277	if (TC_U32_KEY(n->handle) == 0) {
1278		struct tc_u_hnode *ht = fh;
1279		u32 divisor = ht->divisor + 1;
1280
1281		if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
1282			goto nla_put_failure;
1283	} else {
1284#ifdef CONFIG_CLS_U32_PERF
1285		struct tc_u32_pcnt *gpf;
1286		int cpu;
1287#endif
1288
1289		if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
 
1290			    &n->sel))
1291			goto nla_put_failure;
1292
1293		ht_up = rtnl_dereference(n->ht_up);
1294		if (ht_up) {
1295			u32 htid = n->handle & 0xFFFFF000;
1296			if (nla_put_u32(skb, TCA_U32_HASH, htid))
1297				goto nla_put_failure;
1298		}
1299		if (n->res.classid &&
1300		    nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
1301			goto nla_put_failure;
1302
1303		ht_down = rtnl_dereference(n->ht_down);
1304		if (ht_down &&
1305		    nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
1306			goto nla_put_failure;
1307
1308		if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
1309			goto nla_put_failure;
1310
1311#ifdef CONFIG_CLS_U32_MARK
1312		if ((n->val || n->mask)) {
1313			struct tc_u32_mark mark = {.val = n->val,
1314						   .mask = n->mask,
1315						   .success = 0};
1316			int cpum;
1317
1318			for_each_possible_cpu(cpum) {
1319				__u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
1320
1321				mark.success += cnt;
1322			}
1323
1324			if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
1325				goto nla_put_failure;
1326		}
1327#endif
1328
1329		if (tcf_exts_dump(skb, &n->exts) < 0)
1330			goto nla_put_failure;
1331
1332		if (n->ifindex) {
1333			struct net_device *dev;
1334			dev = __dev_get_by_index(net, n->ifindex);
1335			if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
1336				goto nla_put_failure;
1337		}
1338#ifdef CONFIG_CLS_U32_PERF
1339		gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
 
 
1340		if (!gpf)
1341			goto nla_put_failure;
1342
1343		for_each_possible_cpu(cpu) {
1344			int i;
1345			struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
1346
1347			gpf->rcnt += pf->rcnt;
1348			gpf->rhit += pf->rhit;
1349			for (i = 0; i < n->sel.nkeys; i++)
1350				gpf->kcnts[i] += pf->kcnts[i];
1351		}
1352
1353		if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
 
 
1354				  gpf, TCA_U32_PAD)) {
1355			kfree(gpf);
1356			goto nla_put_failure;
1357		}
1358		kfree(gpf);
1359#endif
1360	}
1361
1362	nla_nest_end(skb, nest);
1363
1364	if (TC_U32_KEY(n->handle))
1365		if (tcf_exts_dump_stats(skb, &n->exts) < 0)
1366			goto nla_put_failure;
1367	return skb->len;
1368
1369nla_put_failure:
1370	nla_nest_cancel(skb, nest);
1371	return -1;
1372}
1373
1374static struct tcf_proto_ops cls_u32_ops __read_mostly = {
1375	.kind		=	"u32",
1376	.classify	=	u32_classify,
1377	.init		=	u32_init,
1378	.destroy	=	u32_destroy,
1379	.get		=	u32_get,
1380	.change		=	u32_change,
1381	.delete		=	u32_delete,
1382	.walk		=	u32_walk,
1383	.reoffload	=	u32_reoffload,
1384	.dump		=	u32_dump,
1385	.bind_class	=	u32_bind_class,
1386	.owner		=	THIS_MODULE,
1387};
1388
1389static int __init init_u32(void)
1390{
1391	int i, ret;
1392
1393	pr_info("u32 classifier\n");
1394#ifdef CONFIG_CLS_U32_PERF
1395	pr_info("    Performance counters on\n");
1396#endif
1397	pr_info("    input device check on\n");
1398#ifdef CONFIG_NET_CLS_ACT
1399	pr_info("    Actions configured\n");
1400#endif
1401	tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
1402					  sizeof(struct hlist_head),
1403					  GFP_KERNEL);
1404	if (!tc_u_common_hash)
1405		return -ENOMEM;
1406
1407	for (i = 0; i < U32_HASH_SIZE; i++)
1408		INIT_HLIST_HEAD(&tc_u_common_hash[i]);
1409
1410	ret = register_tcf_proto_ops(&cls_u32_ops);
1411	if (ret)
1412		kvfree(tc_u_common_hash);
1413	return ret;
1414}
1415
1416static void __exit exit_u32(void)
1417{
1418	unregister_tcf_proto_ops(&cls_u32_ops);
1419	kvfree(tc_u_common_hash);
1420}
1421
1422module_init(init_u32)
1423module_exit(exit_u32)
1424MODULE_LICENSE("GPL");