Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/em_meta.c	Metadata ematch
   4 *
   5 * Authors:	Thomas Graf <tgraf@suug.ch>
   6 *
   7 * ==========================================================================
   8 *
   9 * 	The metadata ematch compares two meta objects where each object
  10 * 	represents either a meta value stored in the kernel or a static
  11 * 	value provided by userspace. The objects are not provided by
  12 * 	userspace itself but rather a definition providing the information
  13 * 	to build them. Every object is of a certain type which must be
  14 * 	equal to the object it is being compared to.
  15 *
  16 * 	The definition of a objects conists of the type (meta type), a
  17 * 	identifier (meta id) and additional type specific information.
  18 * 	The meta id is either TCF_META_TYPE_VALUE for values provided by
  19 * 	userspace or a index to the meta operations table consisting of
  20 * 	function pointers to type specific meta data collectors returning
  21 * 	the value of the requested meta value.
  22 *
  23 * 	         lvalue                                   rvalue
  24 * 	      +-----------+                           +-----------+
  25 * 	      | type: INT |                           | type: INT |
  26 * 	 def  | id: DEV   |                           | id: VALUE |
  27 * 	      | data:     |                           | data: 3   |
  28 * 	      +-----------+                           +-----------+
  29 * 	            |                                       |
  30 * 	            ---> meta_ops[INT][DEV](...)            |
  31 *	                      |                             |
  32 * 	            -----------                             |
  33 * 	            V                                       V
  34 * 	      +-----------+                           +-----------+
  35 * 	      | type: INT |                           | type: INT |
  36 * 	 obj  | id: DEV |                             | id: VALUE |
  37 * 	      | data: 2   |<--data got filled out     | data: 3   |
  38 * 	      +-----------+                           +-----------+
  39 * 	            |                                         |
  40 * 	            --------------> 2  equals 3 <--------------
  41 *
  42 * 	This is a simplified schema, the complexity varies depending
  43 * 	on the meta type. Obviously, the length of the data must also
  44 * 	be provided for non-numeric types.
  45 *
  46 * 	Additionally, type dependent modifiers such as shift operators
  47 * 	or mask may be applied to extend the functionaliy. As of now,
  48 * 	the variable length type supports shifting the byte string to
  49 * 	the right, eating up any number of octets and thus supporting
  50 * 	wildcard interface name comparisons such as "ppp%" matching
  51 * 	ppp0..9.
  52 *
  53 * 	NOTE: Certain meta values depend on other subsystems and are
  54 * 	      only available if that subsystem is enabled in the kernel.
  55 */
  56
  57#include <linux/slab.h>
  58#include <linux/module.h>
  59#include <linux/types.h>
  60#include <linux/kernel.h>
  61#include <linux/sched.h>
  62#include <linux/sched/loadavg.h>
  63#include <linux/string.h>
  64#include <linux/skbuff.h>
  65#include <linux/random.h>
  66#include <linux/if_vlan.h>
  67#include <linux/tc_ematch/tc_em_meta.h>
  68#include <net/dst.h>
  69#include <net/route.h>
  70#include <net/pkt_cls.h>
  71#include <net/sock.h>
  72
  73struct meta_obj {
  74	unsigned long		value;
  75	unsigned int		len;
  76};
  77
  78struct meta_value {
  79	struct tcf_meta_val	hdr;
  80	unsigned long		val;
  81	unsigned int		len;
  82};
  83
  84struct meta_match {
  85	struct meta_value	lvalue;
  86	struct meta_value	rvalue;
  87};
  88
  89static inline int meta_id(struct meta_value *v)
  90{
  91	return TCF_META_ID(v->hdr.kind);
  92}
  93
  94static inline int meta_type(struct meta_value *v)
  95{
  96	return TCF_META_TYPE(v->hdr.kind);
  97}
  98
  99#define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
 100	struct tcf_pkt_info *info, struct meta_value *v, \
 101	struct meta_obj *dst, int *err)
 102
 103/**************************************************************************
 104 * System status & misc
 105 **************************************************************************/
 106
 107META_COLLECTOR(int_random)
 108{
 109	get_random_bytes(&dst->value, sizeof(dst->value));
 110}
 111
 112static inline unsigned long fixed_loadavg(int load)
 113{
 114	int rnd_load = load + (FIXED_1/200);
 115	int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
 116
 117	return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
 118}
 119
 120META_COLLECTOR(int_loadavg_0)
 121{
 122	dst->value = fixed_loadavg(avenrun[0]);
 123}
 124
 125META_COLLECTOR(int_loadavg_1)
 126{
 127	dst->value = fixed_loadavg(avenrun[1]);
 128}
 129
 130META_COLLECTOR(int_loadavg_2)
 131{
 132	dst->value = fixed_loadavg(avenrun[2]);
 133}
 134
 135/**************************************************************************
 136 * Device names & indices
 137 **************************************************************************/
 138
 139static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
 140{
 141	if (unlikely(dev == NULL))
 142		return -1;
 143
 144	dst->value = dev->ifindex;
 145	return 0;
 146}
 147
 148static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
 149{
 150	if (unlikely(dev == NULL))
 151		return -1;
 152
 153	dst->value = (unsigned long) dev->name;
 154	dst->len = strlen(dev->name);
 155	return 0;
 156}
 157
 158META_COLLECTOR(int_dev)
 159{
 160	*err = int_dev(skb->dev, dst);
 161}
 162
 163META_COLLECTOR(var_dev)
 164{
 165	*err = var_dev(skb->dev, dst);
 166}
 167
 168/**************************************************************************
 169 * vlan tag
 170 **************************************************************************/
 171
 172META_COLLECTOR(int_vlan_tag)
 173{
 174	unsigned short tag;
 175
 176	if (skb_vlan_tag_present(skb))
 177		dst->value = skb_vlan_tag_get(skb);
 178	else if (!__vlan_get_tag(skb, &tag))
 179		dst->value = tag;
 180	else
 181		*err = -1;
 182}
 183
 184
 185
 186/**************************************************************************
 187 * skb attributes
 188 **************************************************************************/
 189
 190META_COLLECTOR(int_priority)
 191{
 192	dst->value = skb->priority;
 193}
 194
 195META_COLLECTOR(int_protocol)
 196{
 197	/* Let userspace take care of the byte ordering */
 198	dst->value = tc_skb_protocol(skb);
 199}
 200
 201META_COLLECTOR(int_pkttype)
 202{
 203	dst->value = skb->pkt_type;
 204}
 205
 206META_COLLECTOR(int_pktlen)
 207{
 208	dst->value = skb->len;
 209}
 210
 211META_COLLECTOR(int_datalen)
 212{
 213	dst->value = skb->data_len;
 214}
 215
 216META_COLLECTOR(int_maclen)
 217{
 218	dst->value = skb->mac_len;
 219}
 220
 221META_COLLECTOR(int_rxhash)
 222{
 223	dst->value = skb_get_hash(skb);
 224}
 225
 226/**************************************************************************
 227 * Netfilter
 228 **************************************************************************/
 229
 230META_COLLECTOR(int_mark)
 231{
 232	dst->value = skb->mark;
 233}
 234
 235/**************************************************************************
 236 * Traffic Control
 237 **************************************************************************/
 238
 239META_COLLECTOR(int_tcindex)
 240{
 241	dst->value = skb->tc_index;
 242}
 243
 244/**************************************************************************
 245 * Routing
 246 **************************************************************************/
 247
 248META_COLLECTOR(int_rtclassid)
 249{
 250	if (unlikely(skb_dst(skb) == NULL))
 251		*err = -1;
 252	else
 253#ifdef CONFIG_IP_ROUTE_CLASSID
 254		dst->value = skb_dst(skb)->tclassid;
 255#else
 256		dst->value = 0;
 257#endif
 258}
 259
 260META_COLLECTOR(int_rtiif)
 261{
 262	if (unlikely(skb_rtable(skb) == NULL))
 263		*err = -1;
 264	else
 265		dst->value = inet_iif(skb);
 266}
 267
 268/**************************************************************************
 269 * Socket Attributes
 270 **************************************************************************/
 271
 272#define skip_nonlocal(skb) \
 273	(unlikely(skb->sk == NULL))
 274
 275META_COLLECTOR(int_sk_family)
 276{
 277	if (skip_nonlocal(skb)) {
 278		*err = -1;
 279		return;
 280	}
 281	dst->value = skb->sk->sk_family;
 282}
 283
 284META_COLLECTOR(int_sk_state)
 285{
 286	if (skip_nonlocal(skb)) {
 287		*err = -1;
 288		return;
 289	}
 290	dst->value = skb->sk->sk_state;
 291}
 292
 293META_COLLECTOR(int_sk_reuse)
 294{
 295	if (skip_nonlocal(skb)) {
 296		*err = -1;
 297		return;
 298	}
 299	dst->value = skb->sk->sk_reuse;
 300}
 301
 302META_COLLECTOR(int_sk_bound_if)
 303{
 304	if (skip_nonlocal(skb)) {
 305		*err = -1;
 306		return;
 307	}
 308	/* No error if bound_dev_if is 0, legal userspace check */
 309	dst->value = skb->sk->sk_bound_dev_if;
 310}
 311
 312META_COLLECTOR(var_sk_bound_if)
 313{
 
 
 314	if (skip_nonlocal(skb)) {
 315		*err = -1;
 316		return;
 317	}
 318
 319	if (skb->sk->sk_bound_dev_if == 0) {
 
 320		dst->value = (unsigned long) "any";
 321		dst->len = 3;
 322	} else {
 323		struct net_device *dev;
 324
 325		rcu_read_lock();
 326		dev = dev_get_by_index_rcu(sock_net(skb->sk),
 327					   skb->sk->sk_bound_dev_if);
 328		*err = var_dev(dev, dst);
 329		rcu_read_unlock();
 330	}
 331}
 332
 333META_COLLECTOR(int_sk_refcnt)
 334{
 335	if (skip_nonlocal(skb)) {
 336		*err = -1;
 337		return;
 338	}
 339	dst->value = refcount_read(&skb->sk->sk_refcnt);
 340}
 341
 342META_COLLECTOR(int_sk_rcvbuf)
 343{
 344	const struct sock *sk = skb_to_full_sk(skb);
 345
 346	if (!sk) {
 347		*err = -1;
 348		return;
 349	}
 350	dst->value = sk->sk_rcvbuf;
 351}
 352
 353META_COLLECTOR(int_sk_shutdown)
 354{
 355	const struct sock *sk = skb_to_full_sk(skb);
 356
 357	if (!sk) {
 358		*err = -1;
 359		return;
 360	}
 361	dst->value = sk->sk_shutdown;
 362}
 363
 364META_COLLECTOR(int_sk_proto)
 365{
 366	const struct sock *sk = skb_to_full_sk(skb);
 367
 368	if (!sk) {
 369		*err = -1;
 370		return;
 371	}
 372	dst->value = sk->sk_protocol;
 373}
 374
 375META_COLLECTOR(int_sk_type)
 376{
 377	const struct sock *sk = skb_to_full_sk(skb);
 378
 379	if (!sk) {
 380		*err = -1;
 381		return;
 382	}
 383	dst->value = sk->sk_type;
 384}
 385
 386META_COLLECTOR(int_sk_rmem_alloc)
 387{
 388	const struct sock *sk = skb_to_full_sk(skb);
 389
 390	if (!sk) {
 391		*err = -1;
 392		return;
 393	}
 394	dst->value = sk_rmem_alloc_get(sk);
 395}
 396
 397META_COLLECTOR(int_sk_wmem_alloc)
 398{
 399	const struct sock *sk = skb_to_full_sk(skb);
 400
 401	if (!sk) {
 402		*err = -1;
 403		return;
 404	}
 405	dst->value = sk_wmem_alloc_get(sk);
 406}
 407
 408META_COLLECTOR(int_sk_omem_alloc)
 409{
 410	const struct sock *sk = skb_to_full_sk(skb);
 411
 412	if (!sk) {
 413		*err = -1;
 414		return;
 415	}
 416	dst->value = atomic_read(&sk->sk_omem_alloc);
 417}
 418
 419META_COLLECTOR(int_sk_rcv_qlen)
 420{
 421	const struct sock *sk = skb_to_full_sk(skb);
 422
 423	if (!sk) {
 424		*err = -1;
 425		return;
 426	}
 427	dst->value = sk->sk_receive_queue.qlen;
 428}
 429
 430META_COLLECTOR(int_sk_snd_qlen)
 431{
 432	const struct sock *sk = skb_to_full_sk(skb);
 433
 434	if (!sk) {
 435		*err = -1;
 436		return;
 437	}
 438	dst->value = sk->sk_write_queue.qlen;
 439}
 440
 441META_COLLECTOR(int_sk_wmem_queued)
 442{
 443	const struct sock *sk = skb_to_full_sk(skb);
 444
 445	if (!sk) {
 446		*err = -1;
 447		return;
 448	}
 449	dst->value = READ_ONCE(sk->sk_wmem_queued);
 450}
 451
 452META_COLLECTOR(int_sk_fwd_alloc)
 453{
 454	const struct sock *sk = skb_to_full_sk(skb);
 455
 456	if (!sk) {
 457		*err = -1;
 458		return;
 459	}
 460	dst->value = sk->sk_forward_alloc;
 461}
 462
 463META_COLLECTOR(int_sk_sndbuf)
 464{
 465	const struct sock *sk = skb_to_full_sk(skb);
 466
 467	if (!sk) {
 468		*err = -1;
 469		return;
 470	}
 471	dst->value = sk->sk_sndbuf;
 472}
 473
 474META_COLLECTOR(int_sk_alloc)
 475{
 476	const struct sock *sk = skb_to_full_sk(skb);
 477
 478	if (!sk) {
 479		*err = -1;
 480		return;
 481	}
 482	dst->value = (__force int) sk->sk_allocation;
 483}
 484
 485META_COLLECTOR(int_sk_hash)
 486{
 487	if (skip_nonlocal(skb)) {
 488		*err = -1;
 489		return;
 490	}
 491	dst->value = skb->sk->sk_hash;
 492}
 493
 494META_COLLECTOR(int_sk_lingertime)
 495{
 496	const struct sock *sk = skb_to_full_sk(skb);
 497
 498	if (!sk) {
 499		*err = -1;
 500		return;
 501	}
 502	dst->value = sk->sk_lingertime / HZ;
 503}
 504
 505META_COLLECTOR(int_sk_err_qlen)
 506{
 507	const struct sock *sk = skb_to_full_sk(skb);
 508
 509	if (!sk) {
 510		*err = -1;
 511		return;
 512	}
 513	dst->value = sk->sk_error_queue.qlen;
 514}
 515
 516META_COLLECTOR(int_sk_ack_bl)
 517{
 518	const struct sock *sk = skb_to_full_sk(skb);
 519
 520	if (!sk) {
 521		*err = -1;
 522		return;
 523	}
 524	dst->value = sk->sk_ack_backlog;
 525}
 526
 527META_COLLECTOR(int_sk_max_ack_bl)
 528{
 529	const struct sock *sk = skb_to_full_sk(skb);
 530
 531	if (!sk) {
 532		*err = -1;
 533		return;
 534	}
 535	dst->value = sk->sk_max_ack_backlog;
 536}
 537
 538META_COLLECTOR(int_sk_prio)
 539{
 540	const struct sock *sk = skb_to_full_sk(skb);
 541
 542	if (!sk) {
 543		*err = -1;
 544		return;
 545	}
 546	dst->value = sk->sk_priority;
 547}
 548
 549META_COLLECTOR(int_sk_rcvlowat)
 550{
 551	const struct sock *sk = skb_to_full_sk(skb);
 552
 553	if (!sk) {
 554		*err = -1;
 555		return;
 556	}
 557	dst->value = READ_ONCE(sk->sk_rcvlowat);
 558}
 559
 560META_COLLECTOR(int_sk_rcvtimeo)
 561{
 562	const struct sock *sk = skb_to_full_sk(skb);
 563
 564	if (!sk) {
 565		*err = -1;
 566		return;
 567	}
 568	dst->value = sk->sk_rcvtimeo / HZ;
 569}
 570
 571META_COLLECTOR(int_sk_sndtimeo)
 572{
 573	const struct sock *sk = skb_to_full_sk(skb);
 574
 575	if (!sk) {
 576		*err = -1;
 577		return;
 578	}
 579	dst->value = sk->sk_sndtimeo / HZ;
 580}
 581
 582META_COLLECTOR(int_sk_sendmsg_off)
 583{
 584	const struct sock *sk = skb_to_full_sk(skb);
 585
 586	if (!sk) {
 587		*err = -1;
 588		return;
 589	}
 590	dst->value = sk->sk_frag.offset;
 591}
 592
 593META_COLLECTOR(int_sk_write_pend)
 594{
 595	const struct sock *sk = skb_to_full_sk(skb);
 596
 597	if (!sk) {
 598		*err = -1;
 599		return;
 600	}
 601	dst->value = sk->sk_write_pending;
 602}
 603
 604/**************************************************************************
 605 * Meta value collectors assignment table
 606 **************************************************************************/
 607
 608struct meta_ops {
 609	void		(*get)(struct sk_buff *, struct tcf_pkt_info *,
 610			       struct meta_value *, struct meta_obj *, int *);
 611};
 612
 613#define META_ID(name) TCF_META_ID_##name
 614#define META_FUNC(name) { .get = meta_##name }
 615
 616/* Meta value operations table listing all meta value collectors and
 617 * assigns them to a type and meta id. */
 618static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
 619	[TCF_META_TYPE_VAR] = {
 620		[META_ID(DEV)]			= META_FUNC(var_dev),
 621		[META_ID(SK_BOUND_IF)] 		= META_FUNC(var_sk_bound_if),
 622	},
 623	[TCF_META_TYPE_INT] = {
 624		[META_ID(RANDOM)]		= META_FUNC(int_random),
 625		[META_ID(LOADAVG_0)]		= META_FUNC(int_loadavg_0),
 626		[META_ID(LOADAVG_1)]		= META_FUNC(int_loadavg_1),
 627		[META_ID(LOADAVG_2)]		= META_FUNC(int_loadavg_2),
 628		[META_ID(DEV)]			= META_FUNC(int_dev),
 629		[META_ID(PRIORITY)]		= META_FUNC(int_priority),
 630		[META_ID(PROTOCOL)]		= META_FUNC(int_protocol),
 631		[META_ID(PKTTYPE)]		= META_FUNC(int_pkttype),
 632		[META_ID(PKTLEN)]		= META_FUNC(int_pktlen),
 633		[META_ID(DATALEN)]		= META_FUNC(int_datalen),
 634		[META_ID(MACLEN)]		= META_FUNC(int_maclen),
 635		[META_ID(NFMARK)]		= META_FUNC(int_mark),
 636		[META_ID(TCINDEX)]		= META_FUNC(int_tcindex),
 637		[META_ID(RTCLASSID)]		= META_FUNC(int_rtclassid),
 638		[META_ID(RTIIF)]		= META_FUNC(int_rtiif),
 639		[META_ID(SK_FAMILY)]		= META_FUNC(int_sk_family),
 640		[META_ID(SK_STATE)]		= META_FUNC(int_sk_state),
 641		[META_ID(SK_REUSE)]		= META_FUNC(int_sk_reuse),
 642		[META_ID(SK_BOUND_IF)]		= META_FUNC(int_sk_bound_if),
 643		[META_ID(SK_REFCNT)]		= META_FUNC(int_sk_refcnt),
 644		[META_ID(SK_RCVBUF)]		= META_FUNC(int_sk_rcvbuf),
 645		[META_ID(SK_SNDBUF)]		= META_FUNC(int_sk_sndbuf),
 646		[META_ID(SK_SHUTDOWN)]		= META_FUNC(int_sk_shutdown),
 647		[META_ID(SK_PROTO)]		= META_FUNC(int_sk_proto),
 648		[META_ID(SK_TYPE)]		= META_FUNC(int_sk_type),
 649		[META_ID(SK_RMEM_ALLOC)]	= META_FUNC(int_sk_rmem_alloc),
 650		[META_ID(SK_WMEM_ALLOC)]	= META_FUNC(int_sk_wmem_alloc),
 651		[META_ID(SK_OMEM_ALLOC)]	= META_FUNC(int_sk_omem_alloc),
 652		[META_ID(SK_WMEM_QUEUED)]	= META_FUNC(int_sk_wmem_queued),
 653		[META_ID(SK_RCV_QLEN)]		= META_FUNC(int_sk_rcv_qlen),
 654		[META_ID(SK_SND_QLEN)]		= META_FUNC(int_sk_snd_qlen),
 655		[META_ID(SK_ERR_QLEN)]		= META_FUNC(int_sk_err_qlen),
 656		[META_ID(SK_FORWARD_ALLOCS)]	= META_FUNC(int_sk_fwd_alloc),
 657		[META_ID(SK_ALLOCS)]		= META_FUNC(int_sk_alloc),
 658		[META_ID(SK_HASH)]		= META_FUNC(int_sk_hash),
 659		[META_ID(SK_LINGERTIME)]	= META_FUNC(int_sk_lingertime),
 660		[META_ID(SK_ACK_BACKLOG)]	= META_FUNC(int_sk_ack_bl),
 661		[META_ID(SK_MAX_ACK_BACKLOG)]	= META_FUNC(int_sk_max_ack_bl),
 662		[META_ID(SK_PRIO)]		= META_FUNC(int_sk_prio),
 663		[META_ID(SK_RCVLOWAT)]		= META_FUNC(int_sk_rcvlowat),
 664		[META_ID(SK_RCVTIMEO)]		= META_FUNC(int_sk_rcvtimeo),
 665		[META_ID(SK_SNDTIMEO)]		= META_FUNC(int_sk_sndtimeo),
 666		[META_ID(SK_SENDMSG_OFF)]	= META_FUNC(int_sk_sendmsg_off),
 667		[META_ID(SK_WRITE_PENDING)]	= META_FUNC(int_sk_write_pend),
 668		[META_ID(VLAN_TAG)]		= META_FUNC(int_vlan_tag),
 669		[META_ID(RXHASH)]		= META_FUNC(int_rxhash),
 670	}
 671};
 672
 673static inline struct meta_ops *meta_ops(struct meta_value *val)
 674{
 675	return &__meta_ops[meta_type(val)][meta_id(val)];
 676}
 677
 678/**************************************************************************
 679 * Type specific operations for TCF_META_TYPE_VAR
 680 **************************************************************************/
 681
 682static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
 683{
 684	int r = a->len - b->len;
 685
 686	if (r == 0)
 687		r = memcmp((void *) a->value, (void *) b->value, a->len);
 688
 689	return r;
 690}
 691
 692static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
 693{
 694	int len = nla_len(nla);
 695
 696	dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
 697	if (dst->val == 0UL)
 698		return -ENOMEM;
 699	dst->len = len;
 700	return 0;
 701}
 702
 703static void meta_var_destroy(struct meta_value *v)
 704{
 705	kfree((void *) v->val);
 706}
 707
 708static void meta_var_apply_extras(struct meta_value *v,
 709				  struct meta_obj *dst)
 710{
 711	int shift = v->hdr.shift;
 712
 713	if (shift && shift < dst->len)
 714		dst->len -= shift;
 715}
 716
 717static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 718{
 719	if (v->val && v->len &&
 720	    nla_put(skb, tlv, v->len, (void *) v->val))
 721		goto nla_put_failure;
 722	return 0;
 723
 724nla_put_failure:
 725	return -1;
 726}
 727
 728/**************************************************************************
 729 * Type specific operations for TCF_META_TYPE_INT
 730 **************************************************************************/
 731
 732static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
 733{
 734	/* Let gcc optimize it, the unlikely is not really based on
 735	 * some numbers but jump free code for mismatches seems
 736	 * more logical. */
 737	if (unlikely(a->value == b->value))
 738		return 0;
 739	else if (a->value < b->value)
 740		return -1;
 741	else
 742		return 1;
 743}
 744
 745static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
 746{
 747	if (nla_len(nla) >= sizeof(unsigned long)) {
 748		dst->val = *(unsigned long *) nla_data(nla);
 749		dst->len = sizeof(unsigned long);
 750	} else if (nla_len(nla) == sizeof(u32)) {
 751		dst->val = nla_get_u32(nla);
 752		dst->len = sizeof(u32);
 753	} else
 754		return -EINVAL;
 755
 756	return 0;
 757}
 758
 759static void meta_int_apply_extras(struct meta_value *v,
 760				  struct meta_obj *dst)
 761{
 762	if (v->hdr.shift)
 763		dst->value >>= v->hdr.shift;
 764
 765	if (v->val)
 766		dst->value &= v->val;
 767}
 768
 769static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 770{
 771	if (v->len == sizeof(unsigned long)) {
 772		if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
 773			goto nla_put_failure;
 774	} else if (v->len == sizeof(u32)) {
 775		if (nla_put_u32(skb, tlv, v->val))
 776			goto nla_put_failure;
 777	}
 778
 779	return 0;
 780
 781nla_put_failure:
 782	return -1;
 783}
 784
 785/**************************************************************************
 786 * Type specific operations table
 787 **************************************************************************/
 788
 789struct meta_type_ops {
 790	void	(*destroy)(struct meta_value *);
 791	int	(*compare)(struct meta_obj *, struct meta_obj *);
 792	int	(*change)(struct meta_value *, struct nlattr *);
 793	void	(*apply_extras)(struct meta_value *, struct meta_obj *);
 794	int	(*dump)(struct sk_buff *, struct meta_value *, int);
 795};
 796
 797static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
 798	[TCF_META_TYPE_VAR] = {
 799		.destroy = meta_var_destroy,
 800		.compare = meta_var_compare,
 801		.change = meta_var_change,
 802		.apply_extras = meta_var_apply_extras,
 803		.dump = meta_var_dump
 804	},
 805	[TCF_META_TYPE_INT] = {
 806		.compare = meta_int_compare,
 807		.change = meta_int_change,
 808		.apply_extras = meta_int_apply_extras,
 809		.dump = meta_int_dump
 810	}
 811};
 812
 813static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
 814{
 815	return &__meta_type_ops[meta_type(v)];
 816}
 817
 818/**************************************************************************
 819 * Core
 820 **************************************************************************/
 821
 822static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
 823		    struct meta_value *v, struct meta_obj *dst)
 824{
 825	int err = 0;
 826
 827	if (meta_id(v) == TCF_META_ID_VALUE) {
 828		dst->value = v->val;
 829		dst->len = v->len;
 830		return 0;
 831	}
 832
 833	meta_ops(v)->get(skb, info, v, dst, &err);
 834	if (err < 0)
 835		return err;
 836
 837	if (meta_type_ops(v)->apply_extras)
 838		meta_type_ops(v)->apply_extras(v, dst);
 839
 840	return 0;
 841}
 842
 843static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
 844			 struct tcf_pkt_info *info)
 845{
 846	int r;
 847	struct meta_match *meta = (struct meta_match *) m->data;
 848	struct meta_obj l_value, r_value;
 849
 850	if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
 851	    meta_get(skb, info, &meta->rvalue, &r_value) < 0)
 852		return 0;
 853
 854	r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
 855
 856	switch (meta->lvalue.hdr.op) {
 857	case TCF_EM_OPND_EQ:
 858		return !r;
 859	case TCF_EM_OPND_LT:
 860		return r < 0;
 861	case TCF_EM_OPND_GT:
 862		return r > 0;
 863	}
 864
 865	return 0;
 866}
 867
 868static void meta_delete(struct meta_match *meta)
 869{
 870	if (meta) {
 871		const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
 872
 873		if (ops && ops->destroy) {
 874			ops->destroy(&meta->lvalue);
 875			ops->destroy(&meta->rvalue);
 876		}
 877	}
 878
 879	kfree(meta);
 880}
 881
 882static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
 883{
 884	if (nla) {
 885		if (nla_len(nla) == 0)
 886			return -EINVAL;
 887
 888		return meta_type_ops(dst)->change(dst, nla);
 889	}
 890
 891	return 0;
 892}
 893
 894static inline int meta_is_supported(struct meta_value *val)
 895{
 896	return !meta_id(val) || meta_ops(val)->get;
 897}
 898
 899static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
 900	[TCA_EM_META_HDR]	= { .len = sizeof(struct tcf_meta_hdr) },
 901};
 902
 903static int em_meta_change(struct net *net, void *data, int len,
 904			  struct tcf_ematch *m)
 905{
 906	int err;
 907	struct nlattr *tb[TCA_EM_META_MAX + 1];
 908	struct tcf_meta_hdr *hdr;
 909	struct meta_match *meta = NULL;
 910
 911	err = nla_parse_deprecated(tb, TCA_EM_META_MAX, data, len,
 912				   meta_policy, NULL);
 913	if (err < 0)
 914		goto errout;
 915
 916	err = -EINVAL;
 917	if (tb[TCA_EM_META_HDR] == NULL)
 918		goto errout;
 919	hdr = nla_data(tb[TCA_EM_META_HDR]);
 920
 921	if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
 922	    TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
 923	    TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
 924	    TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
 925		goto errout;
 926
 927	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
 928	if (meta == NULL) {
 929		err = -ENOMEM;
 930		goto errout;
 931	}
 932
 933	memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
 934	memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
 935
 936	if (!meta_is_supported(&meta->lvalue) ||
 937	    !meta_is_supported(&meta->rvalue)) {
 938		err = -EOPNOTSUPP;
 939		goto errout;
 940	}
 941
 942	if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
 943	    meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
 944		goto errout;
 945
 946	m->datalen = sizeof(*meta);
 947	m->data = (unsigned long) meta;
 948
 949	err = 0;
 950errout:
 951	if (err && meta)
 952		meta_delete(meta);
 953	return err;
 954}
 955
 956static void em_meta_destroy(struct tcf_ematch *m)
 957{
 958	if (m)
 959		meta_delete((struct meta_match *) m->data);
 960}
 961
 962static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
 963{
 964	struct meta_match *meta = (struct meta_match *) em->data;
 965	struct tcf_meta_hdr hdr;
 966	const struct meta_type_ops *ops;
 967
 968	memset(&hdr, 0, sizeof(hdr));
 969	memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
 970	memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
 971
 972	if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
 973		goto nla_put_failure;
 974
 975	ops = meta_type_ops(&meta->lvalue);
 976	if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
 977	    ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
 978		goto nla_put_failure;
 979
 980	return 0;
 981
 982nla_put_failure:
 983	return -1;
 984}
 985
 986static struct tcf_ematch_ops em_meta_ops = {
 987	.kind	  = TCF_EM_META,
 988	.change	  = em_meta_change,
 989	.match	  = em_meta_match,
 990	.destroy  = em_meta_destroy,
 991	.dump	  = em_meta_dump,
 992	.owner	  = THIS_MODULE,
 993	.link	  = LIST_HEAD_INIT(em_meta_ops.link)
 994};
 995
 996static int __init init_em_meta(void)
 997{
 998	return tcf_em_register(&em_meta_ops);
 999}
1000
1001static void __exit exit_em_meta(void)
1002{
1003	tcf_em_unregister(&em_meta_ops);
1004}
1005
 
1006MODULE_LICENSE("GPL");
1007
1008module_init(init_em_meta);
1009module_exit(exit_em_meta);
1010
1011MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/em_meta.c	Metadata ematch
   4 *
   5 * Authors:	Thomas Graf <tgraf@suug.ch>
   6 *
   7 * ==========================================================================
   8 *
   9 * 	The metadata ematch compares two meta objects where each object
  10 * 	represents either a meta value stored in the kernel or a static
  11 * 	value provided by userspace. The objects are not provided by
  12 * 	userspace itself but rather a definition providing the information
  13 * 	to build them. Every object is of a certain type which must be
  14 * 	equal to the object it is being compared to.
  15 *
  16 * 	The definition of a objects conists of the type (meta type), a
  17 * 	identifier (meta id) and additional type specific information.
  18 * 	The meta id is either TCF_META_TYPE_VALUE for values provided by
  19 * 	userspace or a index to the meta operations table consisting of
  20 * 	function pointers to type specific meta data collectors returning
  21 * 	the value of the requested meta value.
  22 *
  23 * 	         lvalue                                   rvalue
  24 * 	      +-----------+                           +-----------+
  25 * 	      | type: INT |                           | type: INT |
  26 * 	 def  | id: DEV   |                           | id: VALUE |
  27 * 	      | data:     |                           | data: 3   |
  28 * 	      +-----------+                           +-----------+
  29 * 	            |                                       |
  30 * 	            ---> meta_ops[INT][DEV](...)            |
  31 *	                      |                             |
  32 * 	            -----------                             |
  33 * 	            V                                       V
  34 * 	      +-----------+                           +-----------+
  35 * 	      | type: INT |                           | type: INT |
  36 * 	 obj  | id: DEV |                             | id: VALUE |
  37 * 	      | data: 2   |<--data got filled out     | data: 3   |
  38 * 	      +-----------+                           +-----------+
  39 * 	            |                                         |
  40 * 	            --------------> 2  equals 3 <--------------
  41 *
  42 * 	This is a simplified schema, the complexity varies depending
  43 * 	on the meta type. Obviously, the length of the data must also
  44 * 	be provided for non-numeric types.
  45 *
  46 * 	Additionally, type dependent modifiers such as shift operators
  47 * 	or mask may be applied to extend the functionality. As of now,
  48 * 	the variable length type supports shifting the byte string to
  49 * 	the right, eating up any number of octets and thus supporting
  50 * 	wildcard interface name comparisons such as "ppp%" matching
  51 * 	ppp0..9.
  52 *
  53 * 	NOTE: Certain meta values depend on other subsystems and are
  54 * 	      only available if that subsystem is enabled in the kernel.
  55 */
  56
  57#include <linux/slab.h>
  58#include <linux/module.h>
  59#include <linux/types.h>
  60#include <linux/kernel.h>
  61#include <linux/sched.h>
  62#include <linux/sched/loadavg.h>
  63#include <linux/string.h>
  64#include <linux/skbuff.h>
  65#include <linux/random.h>
  66#include <linux/if_vlan.h>
  67#include <linux/tc_ematch/tc_em_meta.h>
  68#include <net/dst.h>
  69#include <net/route.h>
  70#include <net/pkt_cls.h>
  71#include <net/sock.h>
  72
  73struct meta_obj {
  74	unsigned long		value;
  75	unsigned int		len;
  76};
  77
  78struct meta_value {
  79	struct tcf_meta_val	hdr;
  80	unsigned long		val;
  81	unsigned int		len;
  82};
  83
  84struct meta_match {
  85	struct meta_value	lvalue;
  86	struct meta_value	rvalue;
  87};
  88
  89static inline int meta_id(struct meta_value *v)
  90{
  91	return TCF_META_ID(v->hdr.kind);
  92}
  93
  94static inline int meta_type(struct meta_value *v)
  95{
  96	return TCF_META_TYPE(v->hdr.kind);
  97}
  98
  99#define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
 100	struct tcf_pkt_info *info, struct meta_value *v, \
 101	struct meta_obj *dst, int *err)
 102
 103/**************************************************************************
 104 * System status & misc
 105 **************************************************************************/
 106
 107META_COLLECTOR(int_random)
 108{
 109	get_random_bytes(&dst->value, sizeof(dst->value));
 110}
 111
 112static inline unsigned long fixed_loadavg(int load)
 113{
 114	int rnd_load = load + (FIXED_1/200);
 115	int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
 116
 117	return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
 118}
 119
 120META_COLLECTOR(int_loadavg_0)
 121{
 122	dst->value = fixed_loadavg(avenrun[0]);
 123}
 124
 125META_COLLECTOR(int_loadavg_1)
 126{
 127	dst->value = fixed_loadavg(avenrun[1]);
 128}
 129
 130META_COLLECTOR(int_loadavg_2)
 131{
 132	dst->value = fixed_loadavg(avenrun[2]);
 133}
 134
 135/**************************************************************************
 136 * Device names & indices
 137 **************************************************************************/
 138
 139static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
 140{
 141	if (unlikely(dev == NULL))
 142		return -1;
 143
 144	dst->value = dev->ifindex;
 145	return 0;
 146}
 147
 148static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
 149{
 150	if (unlikely(dev == NULL))
 151		return -1;
 152
 153	dst->value = (unsigned long) dev->name;
 154	dst->len = strlen(dev->name);
 155	return 0;
 156}
 157
 158META_COLLECTOR(int_dev)
 159{
 160	*err = int_dev(skb->dev, dst);
 161}
 162
 163META_COLLECTOR(var_dev)
 164{
 165	*err = var_dev(skb->dev, dst);
 166}
 167
 168/**************************************************************************
 169 * vlan tag
 170 **************************************************************************/
 171
 172META_COLLECTOR(int_vlan_tag)
 173{
 174	unsigned short tag;
 175
 176	if (skb_vlan_tag_present(skb))
 177		dst->value = skb_vlan_tag_get(skb);
 178	else if (!__vlan_get_tag(skb, &tag))
 179		dst->value = tag;
 180	else
 181		*err = -1;
 182}
 183
 184
 185
 186/**************************************************************************
 187 * skb attributes
 188 **************************************************************************/
 189
 190META_COLLECTOR(int_priority)
 191{
 192	dst->value = skb->priority;
 193}
 194
 195META_COLLECTOR(int_protocol)
 196{
 197	/* Let userspace take care of the byte ordering */
 198	dst->value = skb_protocol(skb, false);
 199}
 200
 201META_COLLECTOR(int_pkttype)
 202{
 203	dst->value = skb->pkt_type;
 204}
 205
 206META_COLLECTOR(int_pktlen)
 207{
 208	dst->value = skb->len;
 209}
 210
 211META_COLLECTOR(int_datalen)
 212{
 213	dst->value = skb->data_len;
 214}
 215
 216META_COLLECTOR(int_maclen)
 217{
 218	dst->value = skb->mac_len;
 219}
 220
 221META_COLLECTOR(int_rxhash)
 222{
 223	dst->value = skb_get_hash(skb);
 224}
 225
 226/**************************************************************************
 227 * Netfilter
 228 **************************************************************************/
 229
 230META_COLLECTOR(int_mark)
 231{
 232	dst->value = skb->mark;
 233}
 234
 235/**************************************************************************
 236 * Traffic Control
 237 **************************************************************************/
 238
 239META_COLLECTOR(int_tcindex)
 240{
 241	dst->value = skb->tc_index;
 242}
 243
 244/**************************************************************************
 245 * Routing
 246 **************************************************************************/
 247
 248META_COLLECTOR(int_rtclassid)
 249{
 250	if (unlikely(skb_dst(skb) == NULL))
 251		*err = -1;
 252	else
 253#ifdef CONFIG_IP_ROUTE_CLASSID
 254		dst->value = skb_dst(skb)->tclassid;
 255#else
 256		dst->value = 0;
 257#endif
 258}
 259
 260META_COLLECTOR(int_rtiif)
 261{
 262	if (unlikely(skb_rtable(skb) == NULL))
 263		*err = -1;
 264	else
 265		dst->value = inet_iif(skb);
 266}
 267
 268/**************************************************************************
 269 * Socket Attributes
 270 **************************************************************************/
 271
 272#define skip_nonlocal(skb) \
 273	(unlikely(skb->sk == NULL))
 274
 275META_COLLECTOR(int_sk_family)
 276{
 277	if (skip_nonlocal(skb)) {
 278		*err = -1;
 279		return;
 280	}
 281	dst->value = skb->sk->sk_family;
 282}
 283
 284META_COLLECTOR(int_sk_state)
 285{
 286	if (skip_nonlocal(skb)) {
 287		*err = -1;
 288		return;
 289	}
 290	dst->value = skb->sk->sk_state;
 291}
 292
 293META_COLLECTOR(int_sk_reuse)
 294{
 295	if (skip_nonlocal(skb)) {
 296		*err = -1;
 297		return;
 298	}
 299	dst->value = skb->sk->sk_reuse;
 300}
 301
 302META_COLLECTOR(int_sk_bound_if)
 303{
 304	if (skip_nonlocal(skb)) {
 305		*err = -1;
 306		return;
 307	}
 308	/* No error if bound_dev_if is 0, legal userspace check */
 309	dst->value = skb->sk->sk_bound_dev_if;
 310}
 311
 312META_COLLECTOR(var_sk_bound_if)
 313{
 314	int bound_dev_if;
 315
 316	if (skip_nonlocal(skb)) {
 317		*err = -1;
 318		return;
 319	}
 320
 321	bound_dev_if = READ_ONCE(skb->sk->sk_bound_dev_if);
 322	if (bound_dev_if == 0) {
 323		dst->value = (unsigned long) "any";
 324		dst->len = 3;
 325	} else {
 326		struct net_device *dev;
 327
 328		rcu_read_lock();
 329		dev = dev_get_by_index_rcu(sock_net(skb->sk),
 330					   bound_dev_if);
 331		*err = var_dev(dev, dst);
 332		rcu_read_unlock();
 333	}
 334}
 335
 336META_COLLECTOR(int_sk_refcnt)
 337{
 338	if (skip_nonlocal(skb)) {
 339		*err = -1;
 340		return;
 341	}
 342	dst->value = refcount_read(&skb->sk->sk_refcnt);
 343}
 344
 345META_COLLECTOR(int_sk_rcvbuf)
 346{
 347	const struct sock *sk = skb_to_full_sk(skb);
 348
 349	if (!sk) {
 350		*err = -1;
 351		return;
 352	}
 353	dst->value = sk->sk_rcvbuf;
 354}
 355
 356META_COLLECTOR(int_sk_shutdown)
 357{
 358	const struct sock *sk = skb_to_full_sk(skb);
 359
 360	if (!sk) {
 361		*err = -1;
 362		return;
 363	}
 364	dst->value = sk->sk_shutdown;
 365}
 366
 367META_COLLECTOR(int_sk_proto)
 368{
 369	const struct sock *sk = skb_to_full_sk(skb);
 370
 371	if (!sk) {
 372		*err = -1;
 373		return;
 374	}
 375	dst->value = sk->sk_protocol;
 376}
 377
 378META_COLLECTOR(int_sk_type)
 379{
 380	const struct sock *sk = skb_to_full_sk(skb);
 381
 382	if (!sk) {
 383		*err = -1;
 384		return;
 385	}
 386	dst->value = sk->sk_type;
 387}
 388
 389META_COLLECTOR(int_sk_rmem_alloc)
 390{
 391	const struct sock *sk = skb_to_full_sk(skb);
 392
 393	if (!sk) {
 394		*err = -1;
 395		return;
 396	}
 397	dst->value = sk_rmem_alloc_get(sk);
 398}
 399
 400META_COLLECTOR(int_sk_wmem_alloc)
 401{
 402	const struct sock *sk = skb_to_full_sk(skb);
 403
 404	if (!sk) {
 405		*err = -1;
 406		return;
 407	}
 408	dst->value = sk_wmem_alloc_get(sk);
 409}
 410
 411META_COLLECTOR(int_sk_omem_alloc)
 412{
 413	const struct sock *sk = skb_to_full_sk(skb);
 414
 415	if (!sk) {
 416		*err = -1;
 417		return;
 418	}
 419	dst->value = atomic_read(&sk->sk_omem_alloc);
 420}
 421
 422META_COLLECTOR(int_sk_rcv_qlen)
 423{
 424	const struct sock *sk = skb_to_full_sk(skb);
 425
 426	if (!sk) {
 427		*err = -1;
 428		return;
 429	}
 430	dst->value = sk->sk_receive_queue.qlen;
 431}
 432
 433META_COLLECTOR(int_sk_snd_qlen)
 434{
 435	const struct sock *sk = skb_to_full_sk(skb);
 436
 437	if (!sk) {
 438		*err = -1;
 439		return;
 440	}
 441	dst->value = sk->sk_write_queue.qlen;
 442}
 443
 444META_COLLECTOR(int_sk_wmem_queued)
 445{
 446	const struct sock *sk = skb_to_full_sk(skb);
 447
 448	if (!sk) {
 449		*err = -1;
 450		return;
 451	}
 452	dst->value = READ_ONCE(sk->sk_wmem_queued);
 453}
 454
 455META_COLLECTOR(int_sk_fwd_alloc)
 456{
 457	const struct sock *sk = skb_to_full_sk(skb);
 458
 459	if (!sk) {
 460		*err = -1;
 461		return;
 462	}
 463	dst->value = sk_forward_alloc_get(sk);
 464}
 465
 466META_COLLECTOR(int_sk_sndbuf)
 467{
 468	const struct sock *sk = skb_to_full_sk(skb);
 469
 470	if (!sk) {
 471		*err = -1;
 472		return;
 473	}
 474	dst->value = sk->sk_sndbuf;
 475}
 476
 477META_COLLECTOR(int_sk_alloc)
 478{
 479	const struct sock *sk = skb_to_full_sk(skb);
 480
 481	if (!sk) {
 482		*err = -1;
 483		return;
 484	}
 485	dst->value = (__force int) sk->sk_allocation;
 486}
 487
 488META_COLLECTOR(int_sk_hash)
 489{
 490	if (skip_nonlocal(skb)) {
 491		*err = -1;
 492		return;
 493	}
 494	dst->value = skb->sk->sk_hash;
 495}
 496
 497META_COLLECTOR(int_sk_lingertime)
 498{
 499	const struct sock *sk = skb_to_full_sk(skb);
 500
 501	if (!sk) {
 502		*err = -1;
 503		return;
 504	}
 505	dst->value = READ_ONCE(sk->sk_lingertime) / HZ;
 506}
 507
 508META_COLLECTOR(int_sk_err_qlen)
 509{
 510	const struct sock *sk = skb_to_full_sk(skb);
 511
 512	if (!sk) {
 513		*err = -1;
 514		return;
 515	}
 516	dst->value = sk->sk_error_queue.qlen;
 517}
 518
 519META_COLLECTOR(int_sk_ack_bl)
 520{
 521	const struct sock *sk = skb_to_full_sk(skb);
 522
 523	if (!sk) {
 524		*err = -1;
 525		return;
 526	}
 527	dst->value = READ_ONCE(sk->sk_ack_backlog);
 528}
 529
 530META_COLLECTOR(int_sk_max_ack_bl)
 531{
 532	const struct sock *sk = skb_to_full_sk(skb);
 533
 534	if (!sk) {
 535		*err = -1;
 536		return;
 537	}
 538	dst->value = READ_ONCE(sk->sk_max_ack_backlog);
 539}
 540
 541META_COLLECTOR(int_sk_prio)
 542{
 543	const struct sock *sk = skb_to_full_sk(skb);
 544
 545	if (!sk) {
 546		*err = -1;
 547		return;
 548	}
 549	dst->value = READ_ONCE(sk->sk_priority);
 550}
 551
 552META_COLLECTOR(int_sk_rcvlowat)
 553{
 554	const struct sock *sk = skb_to_full_sk(skb);
 555
 556	if (!sk) {
 557		*err = -1;
 558		return;
 559	}
 560	dst->value = READ_ONCE(sk->sk_rcvlowat);
 561}
 562
 563META_COLLECTOR(int_sk_rcvtimeo)
 564{
 565	const struct sock *sk = skb_to_full_sk(skb);
 566
 567	if (!sk) {
 568		*err = -1;
 569		return;
 570	}
 571	dst->value = READ_ONCE(sk->sk_rcvtimeo) / HZ;
 572}
 573
 574META_COLLECTOR(int_sk_sndtimeo)
 575{
 576	const struct sock *sk = skb_to_full_sk(skb);
 577
 578	if (!sk) {
 579		*err = -1;
 580		return;
 581	}
 582	dst->value = READ_ONCE(sk->sk_sndtimeo) / HZ;
 583}
 584
 585META_COLLECTOR(int_sk_sendmsg_off)
 586{
 587	const struct sock *sk = skb_to_full_sk(skb);
 588
 589	if (!sk) {
 590		*err = -1;
 591		return;
 592	}
 593	dst->value = sk->sk_frag.offset;
 594}
 595
 596META_COLLECTOR(int_sk_write_pend)
 597{
 598	const struct sock *sk = skb_to_full_sk(skb);
 599
 600	if (!sk) {
 601		*err = -1;
 602		return;
 603	}
 604	dst->value = sk->sk_write_pending;
 605}
 606
 607/**************************************************************************
 608 * Meta value collectors assignment table
 609 **************************************************************************/
 610
 611struct meta_ops {
 612	void		(*get)(struct sk_buff *, struct tcf_pkt_info *,
 613			       struct meta_value *, struct meta_obj *, int *);
 614};
 615
 616#define META_ID(name) TCF_META_ID_##name
 617#define META_FUNC(name) { .get = meta_##name }
 618
 619/* Meta value operations table listing all meta value collectors and
 620 * assigns them to a type and meta id. */
 621static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
 622	[TCF_META_TYPE_VAR] = {
 623		[META_ID(DEV)]			= META_FUNC(var_dev),
 624		[META_ID(SK_BOUND_IF)] 		= META_FUNC(var_sk_bound_if),
 625	},
 626	[TCF_META_TYPE_INT] = {
 627		[META_ID(RANDOM)]		= META_FUNC(int_random),
 628		[META_ID(LOADAVG_0)]		= META_FUNC(int_loadavg_0),
 629		[META_ID(LOADAVG_1)]		= META_FUNC(int_loadavg_1),
 630		[META_ID(LOADAVG_2)]		= META_FUNC(int_loadavg_2),
 631		[META_ID(DEV)]			= META_FUNC(int_dev),
 632		[META_ID(PRIORITY)]		= META_FUNC(int_priority),
 633		[META_ID(PROTOCOL)]		= META_FUNC(int_protocol),
 634		[META_ID(PKTTYPE)]		= META_FUNC(int_pkttype),
 635		[META_ID(PKTLEN)]		= META_FUNC(int_pktlen),
 636		[META_ID(DATALEN)]		= META_FUNC(int_datalen),
 637		[META_ID(MACLEN)]		= META_FUNC(int_maclen),
 638		[META_ID(NFMARK)]		= META_FUNC(int_mark),
 639		[META_ID(TCINDEX)]		= META_FUNC(int_tcindex),
 640		[META_ID(RTCLASSID)]		= META_FUNC(int_rtclassid),
 641		[META_ID(RTIIF)]		= META_FUNC(int_rtiif),
 642		[META_ID(SK_FAMILY)]		= META_FUNC(int_sk_family),
 643		[META_ID(SK_STATE)]		= META_FUNC(int_sk_state),
 644		[META_ID(SK_REUSE)]		= META_FUNC(int_sk_reuse),
 645		[META_ID(SK_BOUND_IF)]		= META_FUNC(int_sk_bound_if),
 646		[META_ID(SK_REFCNT)]		= META_FUNC(int_sk_refcnt),
 647		[META_ID(SK_RCVBUF)]		= META_FUNC(int_sk_rcvbuf),
 648		[META_ID(SK_SNDBUF)]		= META_FUNC(int_sk_sndbuf),
 649		[META_ID(SK_SHUTDOWN)]		= META_FUNC(int_sk_shutdown),
 650		[META_ID(SK_PROTO)]		= META_FUNC(int_sk_proto),
 651		[META_ID(SK_TYPE)]		= META_FUNC(int_sk_type),
 652		[META_ID(SK_RMEM_ALLOC)]	= META_FUNC(int_sk_rmem_alloc),
 653		[META_ID(SK_WMEM_ALLOC)]	= META_FUNC(int_sk_wmem_alloc),
 654		[META_ID(SK_OMEM_ALLOC)]	= META_FUNC(int_sk_omem_alloc),
 655		[META_ID(SK_WMEM_QUEUED)]	= META_FUNC(int_sk_wmem_queued),
 656		[META_ID(SK_RCV_QLEN)]		= META_FUNC(int_sk_rcv_qlen),
 657		[META_ID(SK_SND_QLEN)]		= META_FUNC(int_sk_snd_qlen),
 658		[META_ID(SK_ERR_QLEN)]		= META_FUNC(int_sk_err_qlen),
 659		[META_ID(SK_FORWARD_ALLOCS)]	= META_FUNC(int_sk_fwd_alloc),
 660		[META_ID(SK_ALLOCS)]		= META_FUNC(int_sk_alloc),
 661		[META_ID(SK_HASH)]		= META_FUNC(int_sk_hash),
 662		[META_ID(SK_LINGERTIME)]	= META_FUNC(int_sk_lingertime),
 663		[META_ID(SK_ACK_BACKLOG)]	= META_FUNC(int_sk_ack_bl),
 664		[META_ID(SK_MAX_ACK_BACKLOG)]	= META_FUNC(int_sk_max_ack_bl),
 665		[META_ID(SK_PRIO)]		= META_FUNC(int_sk_prio),
 666		[META_ID(SK_RCVLOWAT)]		= META_FUNC(int_sk_rcvlowat),
 667		[META_ID(SK_RCVTIMEO)]		= META_FUNC(int_sk_rcvtimeo),
 668		[META_ID(SK_SNDTIMEO)]		= META_FUNC(int_sk_sndtimeo),
 669		[META_ID(SK_SENDMSG_OFF)]	= META_FUNC(int_sk_sendmsg_off),
 670		[META_ID(SK_WRITE_PENDING)]	= META_FUNC(int_sk_write_pend),
 671		[META_ID(VLAN_TAG)]		= META_FUNC(int_vlan_tag),
 672		[META_ID(RXHASH)]		= META_FUNC(int_rxhash),
 673	}
 674};
 675
 676static inline struct meta_ops *meta_ops(struct meta_value *val)
 677{
 678	return &__meta_ops[meta_type(val)][meta_id(val)];
 679}
 680
 681/**************************************************************************
 682 * Type specific operations for TCF_META_TYPE_VAR
 683 **************************************************************************/
 684
 685static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
 686{
 687	int r = a->len - b->len;
 688
 689	if (r == 0)
 690		r = memcmp((void *) a->value, (void *) b->value, a->len);
 691
 692	return r;
 693}
 694
 695static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
 696{
 697	int len = nla_len(nla);
 698
 699	dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
 700	if (dst->val == 0UL)
 701		return -ENOMEM;
 702	dst->len = len;
 703	return 0;
 704}
 705
 706static void meta_var_destroy(struct meta_value *v)
 707{
 708	kfree((void *) v->val);
 709}
 710
 711static void meta_var_apply_extras(struct meta_value *v,
 712				  struct meta_obj *dst)
 713{
 714	int shift = v->hdr.shift;
 715
 716	if (shift && shift < dst->len)
 717		dst->len -= shift;
 718}
 719
 720static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 721{
 722	if (v->val && v->len &&
 723	    nla_put(skb, tlv, v->len, (void *) v->val))
 724		goto nla_put_failure;
 725	return 0;
 726
 727nla_put_failure:
 728	return -1;
 729}
 730
 731/**************************************************************************
 732 * Type specific operations for TCF_META_TYPE_INT
 733 **************************************************************************/
 734
 735static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
 736{
 737	/* Let gcc optimize it, the unlikely is not really based on
 738	 * some numbers but jump free code for mismatches seems
 739	 * more logical. */
 740	if (unlikely(a->value == b->value))
 741		return 0;
 742	else if (a->value < b->value)
 743		return -1;
 744	else
 745		return 1;
 746}
 747
 748static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
 749{
 750	if (nla_len(nla) >= sizeof(unsigned long)) {
 751		dst->val = *(unsigned long *) nla_data(nla);
 752		dst->len = sizeof(unsigned long);
 753	} else if (nla_len(nla) == sizeof(u32)) {
 754		dst->val = nla_get_u32(nla);
 755		dst->len = sizeof(u32);
 756	} else
 757		return -EINVAL;
 758
 759	return 0;
 760}
 761
 762static void meta_int_apply_extras(struct meta_value *v,
 763				  struct meta_obj *dst)
 764{
 765	if (v->hdr.shift)
 766		dst->value >>= v->hdr.shift;
 767
 768	if (v->val)
 769		dst->value &= v->val;
 770}
 771
 772static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
 773{
 774	if (v->len == sizeof(unsigned long)) {
 775		if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
 776			goto nla_put_failure;
 777	} else if (v->len == sizeof(u32)) {
 778		if (nla_put_u32(skb, tlv, v->val))
 779			goto nla_put_failure;
 780	}
 781
 782	return 0;
 783
 784nla_put_failure:
 785	return -1;
 786}
 787
 788/**************************************************************************
 789 * Type specific operations table
 790 **************************************************************************/
 791
 792struct meta_type_ops {
 793	void	(*destroy)(struct meta_value *);
 794	int	(*compare)(struct meta_obj *, struct meta_obj *);
 795	int	(*change)(struct meta_value *, struct nlattr *);
 796	void	(*apply_extras)(struct meta_value *, struct meta_obj *);
 797	int	(*dump)(struct sk_buff *, struct meta_value *, int);
 798};
 799
 800static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
 801	[TCF_META_TYPE_VAR] = {
 802		.destroy = meta_var_destroy,
 803		.compare = meta_var_compare,
 804		.change = meta_var_change,
 805		.apply_extras = meta_var_apply_extras,
 806		.dump = meta_var_dump
 807	},
 808	[TCF_META_TYPE_INT] = {
 809		.compare = meta_int_compare,
 810		.change = meta_int_change,
 811		.apply_extras = meta_int_apply_extras,
 812		.dump = meta_int_dump
 813	}
 814};
 815
 816static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
 817{
 818	return &__meta_type_ops[meta_type(v)];
 819}
 820
 821/**************************************************************************
 822 * Core
 823 **************************************************************************/
 824
 825static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
 826		    struct meta_value *v, struct meta_obj *dst)
 827{
 828	int err = 0;
 829
 830	if (meta_id(v) == TCF_META_ID_VALUE) {
 831		dst->value = v->val;
 832		dst->len = v->len;
 833		return 0;
 834	}
 835
 836	meta_ops(v)->get(skb, info, v, dst, &err);
 837	if (err < 0)
 838		return err;
 839
 840	if (meta_type_ops(v)->apply_extras)
 841		meta_type_ops(v)->apply_extras(v, dst);
 842
 843	return 0;
 844}
 845
 846static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
 847			 struct tcf_pkt_info *info)
 848{
 849	int r;
 850	struct meta_match *meta = (struct meta_match *) m->data;
 851	struct meta_obj l_value, r_value;
 852
 853	if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
 854	    meta_get(skb, info, &meta->rvalue, &r_value) < 0)
 855		return 0;
 856
 857	r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
 858
 859	switch (meta->lvalue.hdr.op) {
 860	case TCF_EM_OPND_EQ:
 861		return !r;
 862	case TCF_EM_OPND_LT:
 863		return r < 0;
 864	case TCF_EM_OPND_GT:
 865		return r > 0;
 866	}
 867
 868	return 0;
 869}
 870
 871static void meta_delete(struct meta_match *meta)
 872{
 873	if (meta) {
 874		const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
 875
 876		if (ops && ops->destroy) {
 877			ops->destroy(&meta->lvalue);
 878			ops->destroy(&meta->rvalue);
 879		}
 880	}
 881
 882	kfree(meta);
 883}
 884
 885static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
 886{
 887	if (nla) {
 888		if (nla_len(nla) == 0)
 889			return -EINVAL;
 890
 891		return meta_type_ops(dst)->change(dst, nla);
 892	}
 893
 894	return 0;
 895}
 896
 897static inline int meta_is_supported(struct meta_value *val)
 898{
 899	return !meta_id(val) || meta_ops(val)->get;
 900}
 901
 902static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
 903	[TCA_EM_META_HDR]	= { .len = sizeof(struct tcf_meta_hdr) },
 904};
 905
 906static int em_meta_change(struct net *net, void *data, int len,
 907			  struct tcf_ematch *m)
 908{
 909	int err;
 910	struct nlattr *tb[TCA_EM_META_MAX + 1];
 911	struct tcf_meta_hdr *hdr;
 912	struct meta_match *meta = NULL;
 913
 914	err = nla_parse_deprecated(tb, TCA_EM_META_MAX, data, len,
 915				   meta_policy, NULL);
 916	if (err < 0)
 917		goto errout;
 918
 919	err = -EINVAL;
 920	if (tb[TCA_EM_META_HDR] == NULL)
 921		goto errout;
 922	hdr = nla_data(tb[TCA_EM_META_HDR]);
 923
 924	if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
 925	    TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
 926	    TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
 927	    TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
 928		goto errout;
 929
 930	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
 931	if (meta == NULL) {
 932		err = -ENOMEM;
 933		goto errout;
 934	}
 935
 936	memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
 937	memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
 938
 939	if (!meta_is_supported(&meta->lvalue) ||
 940	    !meta_is_supported(&meta->rvalue)) {
 941		err = -EOPNOTSUPP;
 942		goto errout;
 943	}
 944
 945	if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
 946	    meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
 947		goto errout;
 948
 949	m->datalen = sizeof(*meta);
 950	m->data = (unsigned long) meta;
 951
 952	err = 0;
 953errout:
 954	if (err && meta)
 955		meta_delete(meta);
 956	return err;
 957}
 958
 959static void em_meta_destroy(struct tcf_ematch *m)
 960{
 961	if (m)
 962		meta_delete((struct meta_match *) m->data);
 963}
 964
 965static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
 966{
 967	struct meta_match *meta = (struct meta_match *) em->data;
 968	struct tcf_meta_hdr hdr;
 969	const struct meta_type_ops *ops;
 970
 971	memset(&hdr, 0, sizeof(hdr));
 972	memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
 973	memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
 974
 975	if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
 976		goto nla_put_failure;
 977
 978	ops = meta_type_ops(&meta->lvalue);
 979	if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
 980	    ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
 981		goto nla_put_failure;
 982
 983	return 0;
 984
 985nla_put_failure:
 986	return -1;
 987}
 988
 989static struct tcf_ematch_ops em_meta_ops = {
 990	.kind	  = TCF_EM_META,
 991	.change	  = em_meta_change,
 992	.match	  = em_meta_match,
 993	.destroy  = em_meta_destroy,
 994	.dump	  = em_meta_dump,
 995	.owner	  = THIS_MODULE,
 996	.link	  = LIST_HEAD_INIT(em_meta_ops.link)
 997};
 998
 999static int __init init_em_meta(void)
1000{
1001	return tcf_em_register(&em_meta_ops);
1002}
1003
1004static void __exit exit_em_meta(void)
1005{
1006	tcf_em_unregister(&em_meta_ops);
1007}
1008
1009MODULE_DESCRIPTION("ematch classifier for various internal kernel metadata, skb metadata and sk metadata");
1010MODULE_LICENSE("GPL");
1011
1012module_init(init_em_meta);
1013module_exit(exit_em_meta);
1014
1015MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);