Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_flower.c		Flower classifier
   4 *
   5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/module.h>
  11#include <linux/rhashtable.h>
  12#include <linux/workqueue.h>
  13#include <linux/refcount.h>
  14
  15#include <linux/if_ether.h>
  16#include <linux/in6.h>
  17#include <linux/ip.h>
  18#include <linux/mpls.h>
  19
  20#include <net/sch_generic.h>
  21#include <net/pkt_cls.h>
  22#include <net/ip.h>
  23#include <net/flow_dissector.h>
  24#include <net/geneve.h>
  25#include <net/vxlan.h>
  26#include <net/erspan.h>
  27
  28#include <net/dst.h>
  29#include <net/dst_metadata.h>
  30
  31#include <uapi/linux/netfilter/nf_conntrack_common.h>
  32
  33struct fl_flow_key {
  34	struct flow_dissector_key_meta meta;
  35	struct flow_dissector_key_control control;
  36	struct flow_dissector_key_control enc_control;
  37	struct flow_dissector_key_basic basic;
  38	struct flow_dissector_key_eth_addrs eth;
  39	struct flow_dissector_key_vlan vlan;
  40	struct flow_dissector_key_vlan cvlan;
  41	union {
  42		struct flow_dissector_key_ipv4_addrs ipv4;
  43		struct flow_dissector_key_ipv6_addrs ipv6;
  44	};
  45	struct flow_dissector_key_ports tp;
  46	struct flow_dissector_key_icmp icmp;
  47	struct flow_dissector_key_arp arp;
  48	struct flow_dissector_key_keyid enc_key_id;
  49	union {
  50		struct flow_dissector_key_ipv4_addrs enc_ipv4;
  51		struct flow_dissector_key_ipv6_addrs enc_ipv6;
  52	};
  53	struct flow_dissector_key_ports enc_tp;
  54	struct flow_dissector_key_mpls mpls;
  55	struct flow_dissector_key_tcp tcp;
  56	struct flow_dissector_key_ip ip;
  57	struct flow_dissector_key_ip enc_ip;
  58	struct flow_dissector_key_enc_opts enc_opts;
  59	union {
  60		struct flow_dissector_key_ports tp;
  61		struct {
  62			struct flow_dissector_key_ports tp_min;
  63			struct flow_dissector_key_ports tp_max;
  64		};
  65	} tp_range;
  66	struct flow_dissector_key_ct ct;
  67	struct flow_dissector_key_hash hash;
  68} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  69
  70struct fl_flow_mask_range {
  71	unsigned short int start;
  72	unsigned short int end;
  73};
  74
  75struct fl_flow_mask {
  76	struct fl_flow_key key;
  77	struct fl_flow_mask_range range;
  78	u32 flags;
  79	struct rhash_head ht_node;
  80	struct rhashtable ht;
  81	struct rhashtable_params filter_ht_params;
  82	struct flow_dissector dissector;
  83	struct list_head filters;
  84	struct rcu_work rwork;
  85	struct list_head list;
  86	refcount_t refcnt;
  87};
  88
  89struct fl_flow_tmplt {
  90	struct fl_flow_key dummy_key;
  91	struct fl_flow_key mask;
  92	struct flow_dissector dissector;
  93	struct tcf_chain *chain;
  94};
  95
  96struct cls_fl_head {
  97	struct rhashtable ht;
  98	spinlock_t masks_lock; /* Protect masks list */
  99	struct list_head masks;
 100	struct list_head hw_filters;
 101	struct rcu_work rwork;
 102	struct idr handle_idr;
 103};
 104
 105struct cls_fl_filter {
 106	struct fl_flow_mask *mask;
 107	struct rhash_head ht_node;
 108	struct fl_flow_key mkey;
 109	struct tcf_exts exts;
 110	struct tcf_result res;
 111	struct fl_flow_key key;
 112	struct list_head list;
 113	struct list_head hw_list;
 114	u32 handle;
 115	u32 flags;
 116	u32 in_hw_count;
 117	struct rcu_work rwork;
 118	struct net_device *hw_dev;
 119	/* Flower classifier is unlocked, which means that its reference counter
 120	 * can be changed concurrently without any kind of external
 121	 * synchronization. Use atomic reference counter to be concurrency-safe.
 122	 */
 123	refcount_t refcnt;
 124	bool deleted;
 125};
 126
 127static const struct rhashtable_params mask_ht_params = {
 128	.key_offset = offsetof(struct fl_flow_mask, key),
 129	.key_len = sizeof(struct fl_flow_key),
 130	.head_offset = offsetof(struct fl_flow_mask, ht_node),
 131	.automatic_shrinking = true,
 132};
 133
 134static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
 135{
 136	return mask->range.end - mask->range.start;
 137}
 138
 139static void fl_mask_update_range(struct fl_flow_mask *mask)
 140{
 141	const u8 *bytes = (const u8 *) &mask->key;
 142	size_t size = sizeof(mask->key);
 143	size_t i, first = 0, last;
 144
 145	for (i = 0; i < size; i++) {
 146		if (bytes[i]) {
 147			first = i;
 148			break;
 149		}
 150	}
 151	last = first;
 152	for (i = size - 1; i != first; i--) {
 153		if (bytes[i]) {
 154			last = i;
 155			break;
 156		}
 157	}
 158	mask->range.start = rounddown(first, sizeof(long));
 159	mask->range.end = roundup(last + 1, sizeof(long));
 160}
 161
 162static void *fl_key_get_start(struct fl_flow_key *key,
 163			      const struct fl_flow_mask *mask)
 164{
 165	return (u8 *) key + mask->range.start;
 166}
 167
 168static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
 169			      struct fl_flow_mask *mask)
 170{
 171	const long *lkey = fl_key_get_start(key, mask);
 172	const long *lmask = fl_key_get_start(&mask->key, mask);
 173	long *lmkey = fl_key_get_start(mkey, mask);
 174	int i;
 175
 176	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
 177		*lmkey++ = *lkey++ & *lmask++;
 178}
 179
 180static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
 181			       struct fl_flow_mask *mask)
 182{
 183	const long *lmask = fl_key_get_start(&mask->key, mask);
 184	const long *ltmplt;
 185	int i;
 186
 187	if (!tmplt)
 188		return true;
 189	ltmplt = fl_key_get_start(&tmplt->mask, mask);
 190	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
 191		if (~*ltmplt++ & *lmask++)
 192			return false;
 193	}
 194	return true;
 195}
 196
 197static void fl_clear_masked_range(struct fl_flow_key *key,
 198				  struct fl_flow_mask *mask)
 199{
 200	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
 201}
 202
 203static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
 204				  struct fl_flow_key *key,
 205				  struct fl_flow_key *mkey)
 206{
 207	__be16 min_mask, max_mask, min_val, max_val;
 208
 209	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
 210	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
 211	min_val = htons(filter->key.tp_range.tp_min.dst);
 212	max_val = htons(filter->key.tp_range.tp_max.dst);
 213
 214	if (min_mask && max_mask) {
 215		if (htons(key->tp_range.tp.dst) < min_val ||
 216		    htons(key->tp_range.tp.dst) > max_val)
 217			return false;
 218
 219		/* skb does not have min and max values */
 220		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
 221		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
 222	}
 223	return true;
 224}
 225
 226static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
 227				  struct fl_flow_key *key,
 228				  struct fl_flow_key *mkey)
 229{
 230	__be16 min_mask, max_mask, min_val, max_val;
 231
 232	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
 233	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
 234	min_val = htons(filter->key.tp_range.tp_min.src);
 235	max_val = htons(filter->key.tp_range.tp_max.src);
 236
 237	if (min_mask && max_mask) {
 238		if (htons(key->tp_range.tp.src) < min_val ||
 239		    htons(key->tp_range.tp.src) > max_val)
 240			return false;
 241
 242		/* skb does not have min and max values */
 243		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
 244		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
 245	}
 246	return true;
 247}
 248
 249static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
 250					 struct fl_flow_key *mkey)
 251{
 252	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
 253				      mask->filter_ht_params);
 254}
 255
 256static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
 257					     struct fl_flow_key *mkey,
 258					     struct fl_flow_key *key)
 259{
 260	struct cls_fl_filter *filter, *f;
 261
 262	list_for_each_entry_rcu(filter, &mask->filters, list) {
 263		if (!fl_range_port_dst_cmp(filter, key, mkey))
 264			continue;
 265
 266		if (!fl_range_port_src_cmp(filter, key, mkey))
 267			continue;
 268
 269		f = __fl_lookup(mask, mkey);
 270		if (f)
 271			return f;
 272	}
 273	return NULL;
 274}
 275
 276static noinline_for_stack
 277struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
 
 278{
 279	struct fl_flow_key mkey;
 280
 281	fl_set_masked_key(&mkey, key, mask);
 282	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
 283		return fl_lookup_range(mask, &mkey, key);
 284
 285	return __fl_lookup(mask, &mkey);
 286}
 287
 288static u16 fl_ct_info_to_flower_map[] = {
 289	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 290					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
 291	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 292					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
 293	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 294					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
 295	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 296					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
 297	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 298					TCA_FLOWER_KEY_CT_FLAGS_NEW,
 299};
 300
 301static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 302		       struct tcf_result *res)
 303{
 304	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
 
 305	struct fl_flow_key skb_key;
 306	struct fl_flow_mask *mask;
 307	struct cls_fl_filter *f;
 308
 309	list_for_each_entry_rcu(mask, &head->masks, list) {
 310		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
 311		fl_clear_masked_range(&skb_key, mask);
 312
 313		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
 314		/* skb_flow_dissect() does not set n_proto in case an unknown
 315		 * protocol, so do it rather here.
 316		 */
 317		skb_key.basic.n_proto = skb_protocol(skb, false);
 318		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
 319		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
 320				    fl_ct_info_to_flower_map,
 321				    ARRAY_SIZE(fl_ct_info_to_flower_map));
 322		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
 323		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
 324
 325		f = fl_mask_lookup(mask, &skb_key);
 
 
 326		if (f && !tc_skip_sw(f->flags)) {
 327			*res = f->res;
 328			return tcf_exts_exec(skb, &f->exts, res);
 329		}
 330	}
 331	return -1;
 332}
 333
 334static int fl_init(struct tcf_proto *tp)
 335{
 336	struct cls_fl_head *head;
 337
 338	head = kzalloc(sizeof(*head), GFP_KERNEL);
 339	if (!head)
 340		return -ENOBUFS;
 341
 342	spin_lock_init(&head->masks_lock);
 343	INIT_LIST_HEAD_RCU(&head->masks);
 344	INIT_LIST_HEAD(&head->hw_filters);
 345	rcu_assign_pointer(tp->root, head);
 346	idr_init(&head->handle_idr);
 347
 348	return rhashtable_init(&head->ht, &mask_ht_params);
 349}
 350
 351static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
 352{
 353	/* temporary masks don't have their filters list and ht initialized */
 354	if (mask_init_done) {
 355		WARN_ON(!list_empty(&mask->filters));
 356		rhashtable_destroy(&mask->ht);
 357	}
 358	kfree(mask);
 359}
 360
 361static void fl_mask_free_work(struct work_struct *work)
 362{
 363	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
 364						 struct fl_flow_mask, rwork);
 365
 366	fl_mask_free(mask, true);
 367}
 368
 369static void fl_uninit_mask_free_work(struct work_struct *work)
 370{
 371	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
 372						 struct fl_flow_mask, rwork);
 373
 374	fl_mask_free(mask, false);
 375}
 376
 377static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
 378{
 379	if (!refcount_dec_and_test(&mask->refcnt))
 380		return false;
 381
 382	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
 383
 384	spin_lock(&head->masks_lock);
 385	list_del_rcu(&mask->list);
 386	spin_unlock(&head->masks_lock);
 387
 388	tcf_queue_work(&mask->rwork, fl_mask_free_work);
 389
 390	return true;
 391}
 392
 393static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
 394{
 395	/* Flower classifier only changes root pointer during init and destroy.
 396	 * Users must obtain reference to tcf_proto instance before calling its
 397	 * API, so tp->root pointer is protected from concurrent call to
 398	 * fl_destroy() by reference counting.
 399	 */
 400	return rcu_dereference_raw(tp->root);
 401}
 402
 403static void __fl_destroy_filter(struct cls_fl_filter *f)
 404{
 405	tcf_exts_destroy(&f->exts);
 406	tcf_exts_put_net(&f->exts);
 407	kfree(f);
 408}
 409
 410static void fl_destroy_filter_work(struct work_struct *work)
 411{
 412	struct cls_fl_filter *f = container_of(to_rcu_work(work),
 413					struct cls_fl_filter, rwork);
 414
 415	__fl_destroy_filter(f);
 416}
 417
 418static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
 419				 bool rtnl_held, struct netlink_ext_ack *extack)
 420{
 421	struct tcf_block *block = tp->chain->block;
 422	struct flow_cls_offload cls_flower = {};
 423
 424	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 425	cls_flower.command = FLOW_CLS_DESTROY;
 426	cls_flower.cookie = (unsigned long) f;
 427
 428	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
 429			    &f->flags, &f->in_hw_count, rtnl_held);
 430
 431}
 432
 433static int fl_hw_replace_filter(struct tcf_proto *tp,
 434				struct cls_fl_filter *f, bool rtnl_held,
 435				struct netlink_ext_ack *extack)
 436{
 437	struct tcf_block *block = tp->chain->block;
 438	struct flow_cls_offload cls_flower = {};
 439	bool skip_sw = tc_skip_sw(f->flags);
 440	int err = 0;
 441
 442	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
 443	if (!cls_flower.rule)
 444		return -ENOMEM;
 445
 446	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 447	cls_flower.command = FLOW_CLS_REPLACE;
 448	cls_flower.cookie = (unsigned long) f;
 449	cls_flower.rule->match.dissector = &f->mask->dissector;
 450	cls_flower.rule->match.mask = &f->mask->key;
 451	cls_flower.rule->match.key = &f->mkey;
 452	cls_flower.classid = f->res.classid;
 453
 454	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
 
 455	if (err) {
 456		kfree(cls_flower.rule);
 457		if (skip_sw) {
 458			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
 459			return err;
 460		}
 461		return 0;
 462	}
 463
 464	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
 465			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
 466	tc_cleanup_flow_action(&cls_flower.rule->action);
 467	kfree(cls_flower.rule);
 468
 469	if (err) {
 470		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
 471		return err;
 472	}
 473
 474	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
 475		return -EINVAL;
 476
 477	return 0;
 478}
 479
 480static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
 481			       bool rtnl_held)
 482{
 483	struct tcf_block *block = tp->chain->block;
 484	struct flow_cls_offload cls_flower = {};
 485
 486	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
 487	cls_flower.command = FLOW_CLS_STATS;
 488	cls_flower.cookie = (unsigned long) f;
 489	cls_flower.classid = f->res.classid;
 490
 491	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
 492			 rtnl_held);
 493
 494	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
 495			      cls_flower.stats.pkts,
 496			      cls_flower.stats.drops,
 497			      cls_flower.stats.lastused,
 498			      cls_flower.stats.used_hw_stats,
 499			      cls_flower.stats.used_hw_stats_valid);
 500}
 501
 502static void __fl_put(struct cls_fl_filter *f)
 503{
 504	if (!refcount_dec_and_test(&f->refcnt))
 505		return;
 506
 507	if (tcf_exts_get_net(&f->exts))
 508		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
 509	else
 510		__fl_destroy_filter(f);
 511}
 512
 513static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
 514{
 515	struct cls_fl_filter *f;
 516
 517	rcu_read_lock();
 518	f = idr_find(&head->handle_idr, handle);
 519	if (f && !refcount_inc_not_zero(&f->refcnt))
 520		f = NULL;
 521	rcu_read_unlock();
 522
 523	return f;
 524}
 525
 526static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
 527		       bool *last, bool rtnl_held,
 528		       struct netlink_ext_ack *extack)
 529{
 530	struct cls_fl_head *head = fl_head_dereference(tp);
 531
 532	*last = false;
 533
 534	spin_lock(&tp->lock);
 535	if (f->deleted) {
 536		spin_unlock(&tp->lock);
 537		return -ENOENT;
 538	}
 539
 540	f->deleted = true;
 541	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
 542			       f->mask->filter_ht_params);
 543	idr_remove(&head->handle_idr, f->handle);
 544	list_del_rcu(&f->list);
 545	spin_unlock(&tp->lock);
 546
 547	*last = fl_mask_put(head, f->mask);
 548	if (!tc_skip_hw(f->flags))
 549		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
 550	tcf_unbind_filter(tp, &f->res);
 551	__fl_put(f);
 552
 553	return 0;
 554}
 555
 556static void fl_destroy_sleepable(struct work_struct *work)
 557{
 558	struct cls_fl_head *head = container_of(to_rcu_work(work),
 559						struct cls_fl_head,
 560						rwork);
 561
 562	rhashtable_destroy(&head->ht);
 563	kfree(head);
 564	module_put(THIS_MODULE);
 565}
 566
 567static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
 568		       struct netlink_ext_ack *extack)
 569{
 570	struct cls_fl_head *head = fl_head_dereference(tp);
 571	struct fl_flow_mask *mask, *next_mask;
 572	struct cls_fl_filter *f, *next;
 573	bool last;
 574
 575	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
 576		list_for_each_entry_safe(f, next, &mask->filters, list) {
 577			__fl_delete(tp, f, &last, rtnl_held, extack);
 578			if (last)
 579				break;
 580		}
 581	}
 582	idr_destroy(&head->handle_idr);
 583
 584	__module_get(THIS_MODULE);
 585	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
 586}
 587
 588static void fl_put(struct tcf_proto *tp, void *arg)
 589{
 590	struct cls_fl_filter *f = arg;
 591
 592	__fl_put(f);
 593}
 594
 595static void *fl_get(struct tcf_proto *tp, u32 handle)
 596{
 597	struct cls_fl_head *head = fl_head_dereference(tp);
 598
 599	return __fl_get(head, handle);
 600}
 601
 602static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
 603	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
 604	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
 605	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
 606					    .len = IFNAMSIZ },
 607	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
 608	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
 609	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
 610	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
 611	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
 612	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
 613	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
 614	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
 615	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
 616	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
 617	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
 618	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
 619	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
 620	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
 621	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
 622	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
 623	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
 624	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
 625	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
 626	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
 627	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
 628	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
 629	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
 630	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
 631	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
 632	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
 633	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
 634	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
 635	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
 636	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
 637	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
 638	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
 639	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
 640	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
 641	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
 642	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
 643	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
 644	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
 645	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
 646	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
 647	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
 648	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
 649	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
 650	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
 651	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
 652	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
 653	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
 654	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
 655	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
 656	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
 657	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
 658	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
 659	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
 660	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
 661	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
 662	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
 663	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
 664	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
 665	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
 666	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
 667	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
 668	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
 669	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
 670	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
 671	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
 672	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
 673	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
 674	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
 675	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
 676	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
 677	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
 678	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
 679	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
 680	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
 681	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
 682	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
 683	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
 684	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
 685	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
 686	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
 687	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
 688	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
 689	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
 690	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
 691	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
 692	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
 693	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
 694	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
 695	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
 696					    .len = 128 / BITS_PER_BYTE },
 697	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
 698					    .len = 128 / BITS_PER_BYTE },
 699	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
 700	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
 701	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
 702
 703};
 704
 705static const struct nla_policy
 706enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
 707	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
 708		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
 709	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
 710	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
 711	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
 712};
 713
 714static const struct nla_policy
 715geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
 716	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
 717	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
 718	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
 719						       .len = 128 },
 720};
 721
 722static const struct nla_policy
 723vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
 724	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
 725};
 726
 727static const struct nla_policy
 728erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
 729	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
 730	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
 731	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
 732	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
 733};
 734
 735static const struct nla_policy
 736mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
 737	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
 738	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
 739	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
 740	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
 741	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
 742};
 743
 744static void fl_set_key_val(struct nlattr **tb,
 745			   void *val, int val_type,
 746			   void *mask, int mask_type, int len)
 747{
 748	if (!tb[val_type])
 749		return;
 750	nla_memcpy(val, tb[val_type], len);
 751	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
 752		memset(mask, 0xff, len);
 753	else
 754		nla_memcpy(mask, tb[mask_type], len);
 755}
 756
 757static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
 758				 struct fl_flow_key *mask,
 759				 struct netlink_ext_ack *extack)
 760{
 761	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
 762		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
 763		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
 764	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
 765		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
 766		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
 767	fl_set_key_val(tb, &key->tp_range.tp_min.src,
 768		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
 769		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
 770	fl_set_key_val(tb, &key->tp_range.tp_max.src,
 771		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
 772		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
 773
 774	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
 775	    htons(key->tp_range.tp_max.dst) <=
 776	    htons(key->tp_range.tp_min.dst)) {
 777		NL_SET_ERR_MSG_ATTR(extack,
 778				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
 779				    "Invalid destination port range (min must be strictly smaller than max)");
 780		return -EINVAL;
 781	}
 782	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
 783	    htons(key->tp_range.tp_max.src) <=
 784	    htons(key->tp_range.tp_min.src)) {
 785		NL_SET_ERR_MSG_ATTR(extack,
 786				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
 787				    "Invalid source port range (min must be strictly smaller than max)");
 788		return -EINVAL;
 789	}
 790
 791	return 0;
 792}
 793
 794static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
 795			       struct flow_dissector_key_mpls *key_val,
 796			       struct flow_dissector_key_mpls *key_mask,
 797			       struct netlink_ext_ack *extack)
 798{
 799	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
 800	struct flow_dissector_mpls_lse *lse_mask;
 801	struct flow_dissector_mpls_lse *lse_val;
 802	u8 lse_index;
 803	u8 depth;
 804	int err;
 805
 806	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
 807			       mpls_stack_entry_policy, extack);
 808	if (err < 0)
 809		return err;
 810
 811	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
 812		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
 813		return -EINVAL;
 814	}
 815
 816	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
 817
 818	/* LSE depth starts at 1, for consistency with terminology used by
 819	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
 820	 */
 821	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
 822		NL_SET_ERR_MSG_ATTR(extack,
 823				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
 824				    "Invalid MPLS depth");
 825		return -EINVAL;
 826	}
 827	lse_index = depth - 1;
 828
 829	dissector_set_mpls_lse(key_val, lse_index);
 830	dissector_set_mpls_lse(key_mask, lse_index);
 831
 832	lse_val = &key_val->ls[lse_index];
 833	lse_mask = &key_mask->ls[lse_index];
 834
 835	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
 836		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
 837		lse_mask->mpls_ttl = MPLS_TTL_MASK;
 838	}
 839	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
 840		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
 841
 842		if (bos & ~MPLS_BOS_MASK) {
 843			NL_SET_ERR_MSG_ATTR(extack,
 844					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
 845					    "Bottom Of Stack (BOS) must be 0 or 1");
 846			return -EINVAL;
 847		}
 848		lse_val->mpls_bos = bos;
 849		lse_mask->mpls_bos = MPLS_BOS_MASK;
 850	}
 851	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
 852		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
 853
 854		if (tc & ~MPLS_TC_MASK) {
 855			NL_SET_ERR_MSG_ATTR(extack,
 856					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
 857					    "Traffic Class (TC) must be between 0 and 7");
 858			return -EINVAL;
 859		}
 860		lse_val->mpls_tc = tc;
 861		lse_mask->mpls_tc = MPLS_TC_MASK;
 862	}
 863	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
 864		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
 865
 866		if (label & ~MPLS_LABEL_MASK) {
 867			NL_SET_ERR_MSG_ATTR(extack,
 868					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
 869					    "Label must be between 0 and 1048575");
 870			return -EINVAL;
 871		}
 872		lse_val->mpls_label = label;
 873		lse_mask->mpls_label = MPLS_LABEL_MASK;
 874	}
 875
 876	return 0;
 877}
 878
 879static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
 880				struct flow_dissector_key_mpls *key_val,
 881				struct flow_dissector_key_mpls *key_mask,
 882				struct netlink_ext_ack *extack)
 883{
 884	struct nlattr *nla_lse;
 885	int rem;
 886	int err;
 887
 888	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
 889		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
 890				    "NLA_F_NESTED is missing");
 891		return -EINVAL;
 892	}
 893
 894	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
 895		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
 896			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
 897					    "Invalid MPLS option type");
 898			return -EINVAL;
 899		}
 900
 901		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
 902		if (err < 0)
 903			return err;
 904	}
 905	if (rem) {
 906		NL_SET_ERR_MSG(extack,
 907			       "Bytes leftover after parsing MPLS options");
 908		return -EINVAL;
 909	}
 910
 911	return 0;
 912}
 913
 914static int fl_set_key_mpls(struct nlattr **tb,
 915			   struct flow_dissector_key_mpls *key_val,
 916			   struct flow_dissector_key_mpls *key_mask,
 917			   struct netlink_ext_ack *extack)
 918{
 919	struct flow_dissector_mpls_lse *lse_mask;
 920	struct flow_dissector_mpls_lse *lse_val;
 921
 922	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
 923		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
 924		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
 925		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
 926		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
 927			NL_SET_ERR_MSG_ATTR(extack,
 928					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
 929					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
 930			return -EBADMSG;
 931		}
 932
 933		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
 934					    key_val, key_mask, extack);
 935	}
 936
 937	lse_val = &key_val->ls[0];
 938	lse_mask = &key_mask->ls[0];
 939
 940	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
 941		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
 942		lse_mask->mpls_ttl = MPLS_TTL_MASK;
 943		dissector_set_mpls_lse(key_val, 0);
 944		dissector_set_mpls_lse(key_mask, 0);
 945	}
 946	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
 947		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
 948
 949		if (bos & ~MPLS_BOS_MASK) {
 950			NL_SET_ERR_MSG_ATTR(extack,
 951					    tb[TCA_FLOWER_KEY_MPLS_BOS],
 952					    "Bottom Of Stack (BOS) must be 0 or 1");
 953			return -EINVAL;
 954		}
 955		lse_val->mpls_bos = bos;
 956		lse_mask->mpls_bos = MPLS_BOS_MASK;
 957		dissector_set_mpls_lse(key_val, 0);
 958		dissector_set_mpls_lse(key_mask, 0);
 959	}
 960	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
 961		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
 962
 963		if (tc & ~MPLS_TC_MASK) {
 964			NL_SET_ERR_MSG_ATTR(extack,
 965					    tb[TCA_FLOWER_KEY_MPLS_TC],
 966					    "Traffic Class (TC) must be between 0 and 7");
 967			return -EINVAL;
 968		}
 969		lse_val->mpls_tc = tc;
 970		lse_mask->mpls_tc = MPLS_TC_MASK;
 971		dissector_set_mpls_lse(key_val, 0);
 972		dissector_set_mpls_lse(key_mask, 0);
 973	}
 974	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
 975		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
 976
 977		if (label & ~MPLS_LABEL_MASK) {
 978			NL_SET_ERR_MSG_ATTR(extack,
 979					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
 980					    "Label must be between 0 and 1048575");
 981			return -EINVAL;
 982		}
 983		lse_val->mpls_label = label;
 984		lse_mask->mpls_label = MPLS_LABEL_MASK;
 985		dissector_set_mpls_lse(key_val, 0);
 986		dissector_set_mpls_lse(key_mask, 0);
 987	}
 988	return 0;
 989}
 990
 991static void fl_set_key_vlan(struct nlattr **tb,
 992			    __be16 ethertype,
 993			    int vlan_id_key, int vlan_prio_key,
 994			    struct flow_dissector_key_vlan *key_val,
 995			    struct flow_dissector_key_vlan *key_mask)
 996{
 997#define VLAN_PRIORITY_MASK	0x7
 998
 999	if (tb[vlan_id_key]) {
1000		key_val->vlan_id =
1001			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1002		key_mask->vlan_id = VLAN_VID_MASK;
1003	}
1004	if (tb[vlan_prio_key]) {
1005		key_val->vlan_priority =
1006			nla_get_u8(tb[vlan_prio_key]) &
1007			VLAN_PRIORITY_MASK;
1008		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1009	}
1010	key_val->vlan_tpid = ethertype;
1011	key_mask->vlan_tpid = cpu_to_be16(~0);
1012}
1013
1014static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1015			    u32 *dissector_key, u32 *dissector_mask,
1016			    u32 flower_flag_bit, u32 dissector_flag_bit)
1017{
1018	if (flower_mask & flower_flag_bit) {
1019		*dissector_mask |= dissector_flag_bit;
1020		if (flower_key & flower_flag_bit)
1021			*dissector_key |= dissector_flag_bit;
1022	}
1023}
1024
1025static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1026			    u32 *flags_mask, struct netlink_ext_ack *extack)
1027{
1028	u32 key, mask;
1029
1030	/* mask is mandatory for flags */
1031	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1032		NL_SET_ERR_MSG(extack, "Missing flags mask");
1033		return -EINVAL;
1034	}
1035
1036	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
1037	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1038
1039	*flags_key  = 0;
1040	*flags_mask = 0;
1041
1042	fl_set_key_flag(key, mask, flags_key, flags_mask,
1043			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1044	fl_set_key_flag(key, mask, flags_key, flags_mask,
1045			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1046			FLOW_DIS_FIRST_FRAG);
1047
1048	return 0;
1049}
1050
1051static void fl_set_key_ip(struct nlattr **tb, bool encap,
1052			  struct flow_dissector_key_ip *key,
1053			  struct flow_dissector_key_ip *mask)
1054{
1055	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1056	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1057	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1058	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1059
1060	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1061	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1062}
1063
1064static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1065			     int depth, int option_len,
1066			     struct netlink_ext_ack *extack)
1067{
1068	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1069	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1070	struct geneve_opt *opt;
1071	int err, data_len = 0;
1072
1073	if (option_len > sizeof(struct geneve_opt))
1074		data_len = option_len - sizeof(struct geneve_opt);
1075
1076	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1077	memset(opt, 0xff, option_len);
1078	opt->length = data_len / 4;
1079	opt->r1 = 0;
1080	opt->r2 = 0;
1081	opt->r3 = 0;
1082
1083	/* If no mask has been prodived we assume an exact match. */
1084	if (!depth)
1085		return sizeof(struct geneve_opt) + data_len;
1086
1087	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1088		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1089		return -EINVAL;
1090	}
1091
1092	err = nla_parse_nested_deprecated(tb,
1093					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1094					  nla, geneve_opt_policy, extack);
1095	if (err < 0)
1096		return err;
1097
1098	/* We are not allowed to omit any of CLASS, TYPE or DATA
1099	 * fields from the key.
1100	 */
1101	if (!option_len &&
1102	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1103	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1104	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1105		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1106		return -EINVAL;
1107	}
1108
1109	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1110	 * for the mask.
1111	 */
1112	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1113		int new_len = key->enc_opts.len;
1114
1115		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1116		data_len = nla_len(data);
1117		if (data_len < 4) {
1118			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1119			return -ERANGE;
1120		}
1121		if (data_len % 4) {
1122			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1123			return -ERANGE;
1124		}
1125
1126		new_len += sizeof(struct geneve_opt) + data_len;
1127		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1128		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1129			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1130			return -ERANGE;
1131		}
1132		opt->length = data_len / 4;
1133		memcpy(opt->opt_data, nla_data(data), data_len);
1134	}
1135
1136	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1137		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1138		opt->opt_class = nla_get_be16(class);
1139	}
1140
1141	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1142		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1143		opt->type = nla_get_u8(type);
1144	}
1145
1146	return sizeof(struct geneve_opt) + data_len;
1147}
1148
1149static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1150			    int depth, int option_len,
1151			    struct netlink_ext_ack *extack)
1152{
1153	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1154	struct vxlan_metadata *md;
1155	int err;
1156
1157	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1158	memset(md, 0xff, sizeof(*md));
1159
1160	if (!depth)
1161		return sizeof(*md);
1162
1163	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1164		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1165		return -EINVAL;
1166	}
1167
1168	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1169			       vxlan_opt_policy, extack);
1170	if (err < 0)
1171		return err;
1172
1173	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1174		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1175		return -EINVAL;
1176	}
1177
1178	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1179		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1180		md->gbp &= VXLAN_GBP_MASK;
1181	}
1182
1183	return sizeof(*md);
1184}
1185
1186static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1187			     int depth, int option_len,
1188			     struct netlink_ext_ack *extack)
1189{
1190	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1191	struct erspan_metadata *md;
1192	int err;
1193
1194	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1195	memset(md, 0xff, sizeof(*md));
1196	md->version = 1;
1197
1198	if (!depth)
1199		return sizeof(*md);
1200
1201	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1202		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1203		return -EINVAL;
1204	}
1205
1206	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1207			       erspan_opt_policy, extack);
1208	if (err < 0)
1209		return err;
1210
1211	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1212		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1213		return -EINVAL;
1214	}
1215
1216	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1217		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1218
1219	if (md->version == 1) {
1220		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1221			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1222			return -EINVAL;
1223		}
1224		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1225			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1226			memset(&md->u, 0x00, sizeof(md->u));
1227			md->u.index = nla_get_be32(nla);
1228		}
1229	} else if (md->version == 2) {
1230		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1231				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1232			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1233			return -EINVAL;
1234		}
1235		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1236			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1237			md->u.md2.dir = nla_get_u8(nla);
1238		}
1239		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1240			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1241			set_hwid(&md->u.md2, nla_get_u8(nla));
1242		}
1243	} else {
1244		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1245		return -EINVAL;
1246	}
1247
1248	return sizeof(*md);
1249}
1250
1251static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1252			  struct fl_flow_key *mask,
1253			  struct netlink_ext_ack *extack)
1254{
1255	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1256	int err, option_len, key_depth, msk_depth = 0;
1257
1258	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1259					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1260					     enc_opts_policy, extack);
1261	if (err)
1262		return err;
1263
1264	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1265
1266	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1267		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1268						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1269						     enc_opts_policy, extack);
1270		if (err)
1271			return err;
1272
1273		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1274		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1275	}
1276
1277	nla_for_each_attr(nla_opt_key, nla_enc_key,
1278			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1279		switch (nla_type(nla_opt_key)) {
1280		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1281			if (key->enc_opts.dst_opt_type &&
1282			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1283				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1284				return -EINVAL;
1285			}
1286			option_len = 0;
1287			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1288			option_len = fl_set_geneve_opt(nla_opt_key, key,
1289						       key_depth, option_len,
1290						       extack);
1291			if (option_len < 0)
1292				return option_len;
1293
1294			key->enc_opts.len += option_len;
1295			/* At the same time we need to parse through the mask
1296			 * in order to verify exact and mask attribute lengths.
1297			 */
1298			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1299			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1300						       msk_depth, option_len,
1301						       extack);
1302			if (option_len < 0)
1303				return option_len;
1304
1305			mask->enc_opts.len += option_len;
1306			if (key->enc_opts.len != mask->enc_opts.len) {
1307				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1308				return -EINVAL;
1309			}
1310
1311			if (msk_depth)
1312				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1313			break;
1314		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1315			if (key->enc_opts.dst_opt_type) {
1316				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1317				return -EINVAL;
1318			}
1319			option_len = 0;
1320			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1321			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1322						      key_depth, option_len,
1323						      extack);
1324			if (option_len < 0)
1325				return option_len;
1326
1327			key->enc_opts.len += option_len;
1328			/* At the same time we need to parse through the mask
1329			 * in order to verify exact and mask attribute lengths.
1330			 */
1331			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1332			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1333						      msk_depth, option_len,
1334						      extack);
1335			if (option_len < 0)
1336				return option_len;
1337
1338			mask->enc_opts.len += option_len;
1339			if (key->enc_opts.len != mask->enc_opts.len) {
1340				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1341				return -EINVAL;
1342			}
1343
1344			if (msk_depth)
1345				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1346			break;
1347		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1348			if (key->enc_opts.dst_opt_type) {
1349				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1350				return -EINVAL;
1351			}
1352			option_len = 0;
1353			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1354			option_len = fl_set_erspan_opt(nla_opt_key, key,
1355						       key_depth, option_len,
1356						       extack);
1357			if (option_len < 0)
1358				return option_len;
1359
1360			key->enc_opts.len += option_len;
1361			/* At the same time we need to parse through the mask
1362			 * in order to verify exact and mask attribute lengths.
1363			 */
1364			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1365			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1366						       msk_depth, option_len,
1367						       extack);
1368			if (option_len < 0)
1369				return option_len;
1370
1371			mask->enc_opts.len += option_len;
1372			if (key->enc_opts.len != mask->enc_opts.len) {
1373				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1374				return -EINVAL;
1375			}
1376
1377			if (msk_depth)
1378				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1379			break;
1380		default:
1381			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1382			return -EINVAL;
1383		}
1384	}
1385
1386	return 0;
1387}
1388
1389static int fl_set_key_ct(struct nlattr **tb,
1390			 struct flow_dissector_key_ct *key,
1391			 struct flow_dissector_key_ct *mask,
1392			 struct netlink_ext_ack *extack)
1393{
1394	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1395		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1396			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1397			return -EOPNOTSUPP;
1398		}
1399		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1400			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1401			       sizeof(key->ct_state));
1402	}
1403	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1404		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1405			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1406			return -EOPNOTSUPP;
1407		}
1408		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1409			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1410			       sizeof(key->ct_zone));
1411	}
1412	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1413		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1414			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1415			return -EOPNOTSUPP;
1416		}
1417		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1418			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1419			       sizeof(key->ct_mark));
1420	}
1421	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1422		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1423			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1424			return -EOPNOTSUPP;
1425		}
1426		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1427			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1428			       sizeof(key->ct_labels));
1429	}
1430
1431	return 0;
1432}
1433
1434static int fl_set_key(struct net *net, struct nlattr **tb,
1435		      struct fl_flow_key *key, struct fl_flow_key *mask,
1436		      struct netlink_ext_ack *extack)
1437{
1438	__be16 ethertype;
1439	int ret = 0;
1440
1441	if (tb[TCA_FLOWER_INDEV]) {
1442		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1443		if (err < 0)
1444			return err;
1445		key->meta.ingress_ifindex = err;
1446		mask->meta.ingress_ifindex = 0xffffffff;
1447	}
1448
1449	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1450		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1451		       sizeof(key->eth.dst));
1452	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1453		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1454		       sizeof(key->eth.src));
1455
1456	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1457		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1458
1459		if (eth_type_vlan(ethertype)) {
1460			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1461					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1462					&mask->vlan);
1463
1464			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1465				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1466				if (eth_type_vlan(ethertype)) {
1467					fl_set_key_vlan(tb, ethertype,
1468							TCA_FLOWER_KEY_CVLAN_ID,
1469							TCA_FLOWER_KEY_CVLAN_PRIO,
1470							&key->cvlan, &mask->cvlan);
1471					fl_set_key_val(tb, &key->basic.n_proto,
1472						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1473						       &mask->basic.n_proto,
1474						       TCA_FLOWER_UNSPEC,
1475						       sizeof(key->basic.n_proto));
1476				} else {
1477					key->basic.n_proto = ethertype;
1478					mask->basic.n_proto = cpu_to_be16(~0);
1479				}
1480			}
1481		} else {
1482			key->basic.n_proto = ethertype;
1483			mask->basic.n_proto = cpu_to_be16(~0);
1484		}
1485	}
1486
1487	if (key->basic.n_proto == htons(ETH_P_IP) ||
1488	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1489		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1490			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1491			       sizeof(key->basic.ip_proto));
1492		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1493	}
1494
1495	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1496		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1497		mask->control.addr_type = ~0;
1498		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1499			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1500			       sizeof(key->ipv4.src));
1501		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1502			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1503			       sizeof(key->ipv4.dst));
1504	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1505		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1506		mask->control.addr_type = ~0;
1507		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1508			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1509			       sizeof(key->ipv6.src));
1510		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1511			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1512			       sizeof(key->ipv6.dst));
1513	}
1514
1515	if (key->basic.ip_proto == IPPROTO_TCP) {
1516		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1517			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1518			       sizeof(key->tp.src));
1519		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1520			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1521			       sizeof(key->tp.dst));
1522		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1523			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1524			       sizeof(key->tcp.flags));
1525	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1526		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1527			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1528			       sizeof(key->tp.src));
1529		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1530			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1531			       sizeof(key->tp.dst));
1532	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1533		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1534			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1535			       sizeof(key->tp.src));
1536		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1537			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1538			       sizeof(key->tp.dst));
1539	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1540		   key->basic.ip_proto == IPPROTO_ICMP) {
1541		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1542			       &mask->icmp.type,
1543			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1544			       sizeof(key->icmp.type));
1545		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1546			       &mask->icmp.code,
1547			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1548			       sizeof(key->icmp.code));
1549	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1550		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1551		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1552			       &mask->icmp.type,
1553			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1554			       sizeof(key->icmp.type));
1555		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1556			       &mask->icmp.code,
1557			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1558			       sizeof(key->icmp.code));
1559	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1560		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1561		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1562		if (ret)
1563			return ret;
1564	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1565		   key->basic.n_proto == htons(ETH_P_RARP)) {
1566		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1567			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1568			       sizeof(key->arp.sip));
1569		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1570			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1571			       sizeof(key->arp.tip));
1572		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1573			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1574			       sizeof(key->arp.op));
1575		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1576			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1577			       sizeof(key->arp.sha));
1578		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1579			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1580			       sizeof(key->arp.tha));
1581	}
1582
1583	if (key->basic.ip_proto == IPPROTO_TCP ||
1584	    key->basic.ip_proto == IPPROTO_UDP ||
1585	    key->basic.ip_proto == IPPROTO_SCTP) {
1586		ret = fl_set_key_port_range(tb, key, mask, extack);
1587		if (ret)
1588			return ret;
1589	}
1590
1591	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1592	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1593		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1594		mask->enc_control.addr_type = ~0;
1595		fl_set_key_val(tb, &key->enc_ipv4.src,
1596			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1597			       &mask->enc_ipv4.src,
1598			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1599			       sizeof(key->enc_ipv4.src));
1600		fl_set_key_val(tb, &key->enc_ipv4.dst,
1601			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1602			       &mask->enc_ipv4.dst,
1603			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1604			       sizeof(key->enc_ipv4.dst));
1605	}
1606
1607	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1608	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1609		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1610		mask->enc_control.addr_type = ~0;
1611		fl_set_key_val(tb, &key->enc_ipv6.src,
1612			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1613			       &mask->enc_ipv6.src,
1614			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1615			       sizeof(key->enc_ipv6.src));
1616		fl_set_key_val(tb, &key->enc_ipv6.dst,
1617			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1618			       &mask->enc_ipv6.dst,
1619			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1620			       sizeof(key->enc_ipv6.dst));
1621	}
1622
1623	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1624		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1625		       sizeof(key->enc_key_id.keyid));
1626
1627	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1628		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1629		       sizeof(key->enc_tp.src));
1630
1631	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1632		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1633		       sizeof(key->enc_tp.dst));
1634
1635	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1636
1637	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1638		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1639		       sizeof(key->hash.hash));
1640
1641	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1642		ret = fl_set_enc_opt(tb, key, mask, extack);
1643		if (ret)
1644			return ret;
1645	}
1646
1647	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1648	if (ret)
1649		return ret;
1650
1651	if (tb[TCA_FLOWER_KEY_FLAGS])
1652		ret = fl_set_key_flags(tb, &key->control.flags,
1653				       &mask->control.flags, extack);
1654
1655	return ret;
1656}
1657
1658static void fl_mask_copy(struct fl_flow_mask *dst,
1659			 struct fl_flow_mask *src)
1660{
1661	const void *psrc = fl_key_get_start(&src->key, src);
1662	void *pdst = fl_key_get_start(&dst->key, src);
1663
1664	memcpy(pdst, psrc, fl_mask_range(src));
1665	dst->range = src->range;
1666}
1667
1668static const struct rhashtable_params fl_ht_params = {
1669	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1670	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1671	.automatic_shrinking = true,
1672};
1673
1674static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1675{
1676	mask->filter_ht_params = fl_ht_params;
1677	mask->filter_ht_params.key_len = fl_mask_range(mask);
1678	mask->filter_ht_params.key_offset += mask->range.start;
1679
1680	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1681}
1682
1683#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1684#define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1685
1686#define FL_KEY_IS_MASKED(mask, member)						\
1687	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1688		   0, FL_KEY_MEMBER_SIZE(member))				\
1689
1690#define FL_KEY_SET(keys, cnt, id, member)					\
1691	do {									\
1692		keys[cnt].key_id = id;						\
1693		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1694		cnt++;								\
1695	} while(0);
1696
1697#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1698	do {									\
1699		if (FL_KEY_IS_MASKED(mask, member))				\
1700			FL_KEY_SET(keys, cnt, id, member);			\
1701	} while(0);
1702
1703static void fl_init_dissector(struct flow_dissector *dissector,
1704			      struct fl_flow_key *mask)
1705{
1706	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1707	size_t cnt = 0;
1708
1709	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1710			     FLOW_DISSECTOR_KEY_META, meta);
1711	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1712	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1713	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1714			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1715	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1716			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1717	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1718			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1719	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1720			     FLOW_DISSECTOR_KEY_PORTS, tp);
1721	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1722			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1723	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1724			     FLOW_DISSECTOR_KEY_IP, ip);
1725	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1726			     FLOW_DISSECTOR_KEY_TCP, tcp);
1727	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1728			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1729	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1730			     FLOW_DISSECTOR_KEY_ARP, arp);
1731	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1732			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1733	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1734			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1735	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1736			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1737	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1738			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1739	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1740			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1741	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1742			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1743	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1744	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1745		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1746			   enc_control);
1747	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1748			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1749	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1750			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1751	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1752			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1753	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1754			     FLOW_DISSECTOR_KEY_CT, ct);
1755	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1756			     FLOW_DISSECTOR_KEY_HASH, hash);
1757
1758	skb_flow_dissector_init(dissector, keys, cnt);
1759}
1760
1761static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1762					       struct fl_flow_mask *mask)
1763{
1764	struct fl_flow_mask *newmask;
1765	int err;
1766
1767	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1768	if (!newmask)
1769		return ERR_PTR(-ENOMEM);
1770
1771	fl_mask_copy(newmask, mask);
1772
1773	if ((newmask->key.tp_range.tp_min.dst &&
1774	     newmask->key.tp_range.tp_max.dst) ||
1775	    (newmask->key.tp_range.tp_min.src &&
1776	     newmask->key.tp_range.tp_max.src))
1777		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1778
1779	err = fl_init_mask_hashtable(newmask);
1780	if (err)
1781		goto errout_free;
1782
1783	fl_init_dissector(&newmask->dissector, &newmask->key);
1784
1785	INIT_LIST_HEAD_RCU(&newmask->filters);
1786
1787	refcount_set(&newmask->refcnt, 1);
1788	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1789				      &newmask->ht_node, mask_ht_params);
1790	if (err)
1791		goto errout_destroy;
1792
1793	spin_lock(&head->masks_lock);
1794	list_add_tail_rcu(&newmask->list, &head->masks);
1795	spin_unlock(&head->masks_lock);
1796
1797	return newmask;
1798
1799errout_destroy:
1800	rhashtable_destroy(&newmask->ht);
1801errout_free:
1802	kfree(newmask);
1803
1804	return ERR_PTR(err);
1805}
1806
1807static int fl_check_assign_mask(struct cls_fl_head *head,
1808				struct cls_fl_filter *fnew,
1809				struct cls_fl_filter *fold,
1810				struct fl_flow_mask *mask)
1811{
1812	struct fl_flow_mask *newmask;
1813	int ret = 0;
1814
1815	rcu_read_lock();
1816
1817	/* Insert mask as temporary node to prevent concurrent creation of mask
1818	 * with same key. Any concurrent lookups with same key will return
1819	 * -EAGAIN because mask's refcnt is zero.
1820	 */
1821	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1822						       &mask->ht_node,
1823						       mask_ht_params);
1824	if (!fnew->mask) {
1825		rcu_read_unlock();
1826
1827		if (fold) {
1828			ret = -EINVAL;
1829			goto errout_cleanup;
1830		}
1831
1832		newmask = fl_create_new_mask(head, mask);
1833		if (IS_ERR(newmask)) {
1834			ret = PTR_ERR(newmask);
1835			goto errout_cleanup;
1836		}
1837
1838		fnew->mask = newmask;
1839		return 0;
1840	} else if (IS_ERR(fnew->mask)) {
1841		ret = PTR_ERR(fnew->mask);
1842	} else if (fold && fold->mask != fnew->mask) {
1843		ret = -EINVAL;
1844	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1845		/* Mask was deleted concurrently, try again */
1846		ret = -EAGAIN;
1847	}
1848	rcu_read_unlock();
1849	return ret;
1850
1851errout_cleanup:
1852	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1853			       mask_ht_params);
1854	return ret;
1855}
1856
1857static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1858			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1859			unsigned long base, struct nlattr **tb,
1860			struct nlattr *est, bool ovr,
1861			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1862			struct netlink_ext_ack *extack)
1863{
1864	int err;
1865
1866	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1867				extack);
1868	if (err < 0)
1869		return err;
1870
1871	if (tb[TCA_FLOWER_CLASSID]) {
1872		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1873		if (!rtnl_held)
1874			rtnl_lock();
1875		tcf_bind_filter(tp, &f->res, base);
1876		if (!rtnl_held)
1877			rtnl_unlock();
1878	}
1879
1880	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1881	if (err)
1882		return err;
1883
1884	fl_mask_update_range(mask);
1885	fl_set_masked_key(&f->mkey, &f->key, mask);
1886
1887	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1888		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1889		return -EINVAL;
1890	}
1891
1892	return 0;
1893}
1894
1895static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1896			       struct cls_fl_filter *fold,
1897			       bool *in_ht)
1898{
1899	struct fl_flow_mask *mask = fnew->mask;
1900	int err;
1901
1902	err = rhashtable_lookup_insert_fast(&mask->ht,
1903					    &fnew->ht_node,
1904					    mask->filter_ht_params);
1905	if (err) {
1906		*in_ht = false;
1907		/* It is okay if filter with same key exists when
1908		 * overwriting.
1909		 */
1910		return fold && err == -EEXIST ? 0 : err;
1911	}
1912
1913	*in_ht = true;
1914	return 0;
1915}
1916
1917static int fl_change(struct net *net, struct sk_buff *in_skb,
1918		     struct tcf_proto *tp, unsigned long base,
1919		     u32 handle, struct nlattr **tca,
1920		     void **arg, bool ovr, bool rtnl_held,
1921		     struct netlink_ext_ack *extack)
1922{
1923	struct cls_fl_head *head = fl_head_dereference(tp);
1924	struct cls_fl_filter *fold = *arg;
1925	struct cls_fl_filter *fnew;
1926	struct fl_flow_mask *mask;
1927	struct nlattr **tb;
1928	bool in_ht;
1929	int err;
1930
1931	if (!tca[TCA_OPTIONS]) {
1932		err = -EINVAL;
1933		goto errout_fold;
1934	}
1935
1936	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1937	if (!mask) {
1938		err = -ENOBUFS;
1939		goto errout_fold;
1940	}
1941
1942	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1943	if (!tb) {
1944		err = -ENOBUFS;
1945		goto errout_mask_alloc;
1946	}
1947
1948	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1949					  tca[TCA_OPTIONS], fl_policy, NULL);
1950	if (err < 0)
1951		goto errout_tb;
1952
1953	if (fold && handle && fold->handle != handle) {
1954		err = -EINVAL;
1955		goto errout_tb;
1956	}
1957
1958	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1959	if (!fnew) {
1960		err = -ENOBUFS;
1961		goto errout_tb;
1962	}
1963	INIT_LIST_HEAD(&fnew->hw_list);
1964	refcount_set(&fnew->refcnt, 1);
1965
1966	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1967	if (err < 0)
1968		goto errout;
1969
1970	if (tb[TCA_FLOWER_FLAGS]) {
1971		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1972
1973		if (!tc_flags_valid(fnew->flags)) {
1974			err = -EINVAL;
1975			goto errout;
1976		}
1977	}
1978
1979	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1980			   tp->chain->tmplt_priv, rtnl_held, extack);
1981	if (err)
1982		goto errout;
1983
1984	err = fl_check_assign_mask(head, fnew, fold, mask);
1985	if (err)
1986		goto errout;
1987
1988	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1989	if (err)
1990		goto errout_mask;
1991
1992	if (!tc_skip_hw(fnew->flags)) {
1993		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1994		if (err)
1995			goto errout_ht;
1996	}
1997
1998	if (!tc_in_hw(fnew->flags))
1999		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2000
2001	spin_lock(&tp->lock);
2002
2003	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2004	 * proto again or create new one, if necessary.
2005	 */
2006	if (tp->deleting) {
2007		err = -EAGAIN;
2008		goto errout_hw;
2009	}
2010
2011	if (fold) {
2012		/* Fold filter was deleted concurrently. Retry lookup. */
2013		if (fold->deleted) {
2014			err = -EAGAIN;
2015			goto errout_hw;
2016		}
2017
2018		fnew->handle = handle;
2019
2020		if (!in_ht) {
2021			struct rhashtable_params params =
2022				fnew->mask->filter_ht_params;
2023
2024			err = rhashtable_insert_fast(&fnew->mask->ht,
2025						     &fnew->ht_node,
2026						     params);
2027			if (err)
2028				goto errout_hw;
2029			in_ht = true;
2030		}
2031
2032		refcount_inc(&fnew->refcnt);
2033		rhashtable_remove_fast(&fold->mask->ht,
2034				       &fold->ht_node,
2035				       fold->mask->filter_ht_params);
2036		idr_replace(&head->handle_idr, fnew, fnew->handle);
2037		list_replace_rcu(&fold->list, &fnew->list);
2038		fold->deleted = true;
2039
2040		spin_unlock(&tp->lock);
2041
2042		fl_mask_put(head, fold->mask);
2043		if (!tc_skip_hw(fold->flags))
2044			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2045		tcf_unbind_filter(tp, &fold->res);
2046		/* Caller holds reference to fold, so refcnt is always > 0
2047		 * after this.
2048		 */
2049		refcount_dec(&fold->refcnt);
2050		__fl_put(fold);
2051	} else {
2052		if (handle) {
2053			/* user specifies a handle and it doesn't exist */
2054			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2055					    handle, GFP_ATOMIC);
2056
2057			/* Filter with specified handle was concurrently
2058			 * inserted after initial check in cls_api. This is not
2059			 * necessarily an error if NLM_F_EXCL is not set in
2060			 * message flags. Returning EAGAIN will cause cls_api to
2061			 * try to update concurrently inserted rule.
2062			 */
2063			if (err == -ENOSPC)
2064				err = -EAGAIN;
2065		} else {
2066			handle = 1;
2067			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2068					    INT_MAX, GFP_ATOMIC);
2069		}
2070		if (err)
2071			goto errout_hw;
2072
2073		refcount_inc(&fnew->refcnt);
2074		fnew->handle = handle;
2075		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2076		spin_unlock(&tp->lock);
2077	}
2078
2079	*arg = fnew;
2080
2081	kfree(tb);
2082	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2083	return 0;
2084
2085errout_ht:
2086	spin_lock(&tp->lock);
2087errout_hw:
2088	fnew->deleted = true;
2089	spin_unlock(&tp->lock);
2090	if (!tc_skip_hw(fnew->flags))
2091		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2092	if (in_ht)
2093		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2094				       fnew->mask->filter_ht_params);
2095errout_mask:
2096	fl_mask_put(head, fnew->mask);
2097errout:
2098	__fl_put(fnew);
2099errout_tb:
2100	kfree(tb);
2101errout_mask_alloc:
2102	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2103errout_fold:
2104	if (fold)
2105		__fl_put(fold);
2106	return err;
2107}
2108
2109static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2110		     bool rtnl_held, struct netlink_ext_ack *extack)
2111{
2112	struct cls_fl_head *head = fl_head_dereference(tp);
2113	struct cls_fl_filter *f = arg;
2114	bool last_on_mask;
2115	int err = 0;
2116
2117	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2118	*last = list_empty(&head->masks);
2119	__fl_put(f);
2120
2121	return err;
2122}
2123
2124static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2125		    bool rtnl_held)
2126{
2127	struct cls_fl_head *head = fl_head_dereference(tp);
2128	unsigned long id = arg->cookie, tmp;
2129	struct cls_fl_filter *f;
2130
2131	arg->count = arg->skip;
2132
2133	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2134		/* don't return filters that are being deleted */
2135		if (!refcount_inc_not_zero(&f->refcnt))
2136			continue;
2137		if (arg->fn(tp, f, arg) < 0) {
2138			__fl_put(f);
2139			arg->stop = 1;
2140			break;
2141		}
2142		__fl_put(f);
2143		arg->count++;
2144	}
2145	arg->cookie = id;
2146}
2147
2148static struct cls_fl_filter *
2149fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2150{
2151	struct cls_fl_head *head = fl_head_dereference(tp);
2152
2153	spin_lock(&tp->lock);
2154	if (list_empty(&head->hw_filters)) {
2155		spin_unlock(&tp->lock);
2156		return NULL;
2157	}
2158
2159	if (!f)
2160		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2161			       hw_list);
2162	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2163		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2164			spin_unlock(&tp->lock);
2165			return f;
2166		}
2167	}
2168
2169	spin_unlock(&tp->lock);
2170	return NULL;
2171}
2172
2173static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2174			void *cb_priv, struct netlink_ext_ack *extack)
2175{
2176	struct tcf_block *block = tp->chain->block;
2177	struct flow_cls_offload cls_flower = {};
2178	struct cls_fl_filter *f = NULL;
2179	int err;
2180
2181	/* hw_filters list can only be changed by hw offload functions after
2182	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2183	 * iterating it.
2184	 */
2185	ASSERT_RTNL();
2186
2187	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2188		cls_flower.rule =
2189			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2190		if (!cls_flower.rule) {
2191			__fl_put(f);
2192			return -ENOMEM;
2193		}
2194
2195		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2196					   extack);
2197		cls_flower.command = add ?
2198			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2199		cls_flower.cookie = (unsigned long)f;
2200		cls_flower.rule->match.dissector = &f->mask->dissector;
2201		cls_flower.rule->match.mask = &f->mask->key;
2202		cls_flower.rule->match.key = &f->mkey;
2203
2204		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
 
2205		if (err) {
2206			kfree(cls_flower.rule);
2207			if (tc_skip_sw(f->flags)) {
2208				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2209				__fl_put(f);
2210				return err;
2211			}
2212			goto next_flow;
2213		}
2214
2215		cls_flower.classid = f->res.classid;
2216
2217		err = tc_setup_cb_reoffload(block, tp, add, cb,
2218					    TC_SETUP_CLSFLOWER, &cls_flower,
2219					    cb_priv, &f->flags,
2220					    &f->in_hw_count);
2221		tc_cleanup_flow_action(&cls_flower.rule->action);
2222		kfree(cls_flower.rule);
2223
2224		if (err) {
2225			__fl_put(f);
2226			return err;
2227		}
2228next_flow:
2229		__fl_put(f);
2230	}
2231
2232	return 0;
2233}
2234
2235static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2236{
2237	struct flow_cls_offload *cls_flower = type_data;
2238	struct cls_fl_filter *f =
2239		(struct cls_fl_filter *) cls_flower->cookie;
2240	struct cls_fl_head *head = fl_head_dereference(tp);
2241
2242	spin_lock(&tp->lock);
2243	list_add(&f->hw_list, &head->hw_filters);
2244	spin_unlock(&tp->lock);
2245}
2246
2247static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2248{
2249	struct flow_cls_offload *cls_flower = type_data;
2250	struct cls_fl_filter *f =
2251		(struct cls_fl_filter *) cls_flower->cookie;
2252
2253	spin_lock(&tp->lock);
2254	if (!list_empty(&f->hw_list))
2255		list_del_init(&f->hw_list);
2256	spin_unlock(&tp->lock);
2257}
2258
2259static int fl_hw_create_tmplt(struct tcf_chain *chain,
2260			      struct fl_flow_tmplt *tmplt)
2261{
2262	struct flow_cls_offload cls_flower = {};
2263	struct tcf_block *block = chain->block;
2264
2265	cls_flower.rule = flow_rule_alloc(0);
2266	if (!cls_flower.rule)
2267		return -ENOMEM;
2268
2269	cls_flower.common.chain_index = chain->index;
2270	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2271	cls_flower.cookie = (unsigned long) tmplt;
2272	cls_flower.rule->match.dissector = &tmplt->dissector;
2273	cls_flower.rule->match.mask = &tmplt->mask;
2274	cls_flower.rule->match.key = &tmplt->dummy_key;
2275
2276	/* We don't care if driver (any of them) fails to handle this
2277	 * call. It serves just as a hint for it.
2278	 */
2279	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2280	kfree(cls_flower.rule);
2281
2282	return 0;
2283}
2284
2285static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2286				struct fl_flow_tmplt *tmplt)
2287{
2288	struct flow_cls_offload cls_flower = {};
2289	struct tcf_block *block = chain->block;
2290
2291	cls_flower.common.chain_index = chain->index;
2292	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2293	cls_flower.cookie = (unsigned long) tmplt;
2294
2295	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2296}
2297
2298static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2299			     struct nlattr **tca,
2300			     struct netlink_ext_ack *extack)
2301{
2302	struct fl_flow_tmplt *tmplt;
2303	struct nlattr **tb;
2304	int err;
2305
2306	if (!tca[TCA_OPTIONS])
2307		return ERR_PTR(-EINVAL);
2308
2309	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2310	if (!tb)
2311		return ERR_PTR(-ENOBUFS);
2312	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2313					  tca[TCA_OPTIONS], fl_policy, NULL);
2314	if (err)
2315		goto errout_tb;
2316
2317	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2318	if (!tmplt) {
2319		err = -ENOMEM;
2320		goto errout_tb;
2321	}
2322	tmplt->chain = chain;
2323	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2324	if (err)
2325		goto errout_tmplt;
2326
2327	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2328
2329	err = fl_hw_create_tmplt(chain, tmplt);
2330	if (err)
2331		goto errout_tmplt;
2332
2333	kfree(tb);
2334	return tmplt;
2335
2336errout_tmplt:
2337	kfree(tmplt);
2338errout_tb:
2339	kfree(tb);
2340	return ERR_PTR(err);
2341}
2342
2343static void fl_tmplt_destroy(void *tmplt_priv)
2344{
2345	struct fl_flow_tmplt *tmplt = tmplt_priv;
2346
2347	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2348	kfree(tmplt);
2349}
2350
2351static int fl_dump_key_val(struct sk_buff *skb,
2352			   void *val, int val_type,
2353			   void *mask, int mask_type, int len)
2354{
2355	int err;
2356
2357	if (!memchr_inv(mask, 0, len))
2358		return 0;
2359	err = nla_put(skb, val_type, len, val);
2360	if (err)
2361		return err;
2362	if (mask_type != TCA_FLOWER_UNSPEC) {
2363		err = nla_put(skb, mask_type, len, mask);
2364		if (err)
2365			return err;
2366	}
2367	return 0;
2368}
2369
2370static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2371				  struct fl_flow_key *mask)
2372{
2373	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2374			    TCA_FLOWER_KEY_PORT_DST_MIN,
2375			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2376			    sizeof(key->tp_range.tp_min.dst)) ||
2377	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2378			    TCA_FLOWER_KEY_PORT_DST_MAX,
2379			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2380			    sizeof(key->tp_range.tp_max.dst)) ||
2381	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2382			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2383			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2384			    sizeof(key->tp_range.tp_min.src)) ||
2385	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2386			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2387			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2388			    sizeof(key->tp_range.tp_max.src)))
2389		return -1;
2390
2391	return 0;
2392}
2393
2394static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2395				    struct flow_dissector_key_mpls *mpls_key,
2396				    struct flow_dissector_key_mpls *mpls_mask,
2397				    u8 lse_index)
2398{
2399	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2400	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2401	int err;
2402
2403	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2404			 lse_index + 1);
2405	if (err)
2406		return err;
2407
2408	if (lse_mask->mpls_ttl) {
2409		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2410				 lse_key->mpls_ttl);
2411		if (err)
2412			return err;
2413	}
2414	if (lse_mask->mpls_bos) {
2415		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2416				 lse_key->mpls_bos);
2417		if (err)
2418			return err;
2419	}
2420	if (lse_mask->mpls_tc) {
2421		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2422				 lse_key->mpls_tc);
2423		if (err)
2424			return err;
2425	}
2426	if (lse_mask->mpls_label) {
2427		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2428				 lse_key->mpls_label);
2429		if (err)
2430			return err;
2431	}
2432
2433	return 0;
2434}
2435
2436static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2437				 struct flow_dissector_key_mpls *mpls_key,
2438				 struct flow_dissector_key_mpls *mpls_mask)
2439{
2440	struct nlattr *opts;
2441	struct nlattr *lse;
2442	u8 lse_index;
2443	int err;
2444
2445	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2446	if (!opts)
2447		return -EMSGSIZE;
2448
2449	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2450		if (!(mpls_mask->used_lses & 1 << lse_index))
2451			continue;
2452
2453		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2454		if (!lse) {
2455			err = -EMSGSIZE;
2456			goto err_opts;
2457		}
2458
2459		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2460					       lse_index);
2461		if (err)
2462			goto err_opts_lse;
2463		nla_nest_end(skb, lse);
2464	}
2465	nla_nest_end(skb, opts);
2466
2467	return 0;
2468
2469err_opts_lse:
2470	nla_nest_cancel(skb, lse);
2471err_opts:
2472	nla_nest_cancel(skb, opts);
2473
2474	return err;
2475}
2476
2477static int fl_dump_key_mpls(struct sk_buff *skb,
2478			    struct flow_dissector_key_mpls *mpls_key,
2479			    struct flow_dissector_key_mpls *mpls_mask)
2480{
2481	struct flow_dissector_mpls_lse *lse_mask;
2482	struct flow_dissector_mpls_lse *lse_key;
2483	int err;
2484
2485	if (!mpls_mask->used_lses)
2486		return 0;
2487
2488	lse_mask = &mpls_mask->ls[0];
2489	lse_key = &mpls_key->ls[0];
2490
2491	/* For backward compatibility, don't use the MPLS nested attributes if
2492	 * the rule can be expressed using the old attributes.
2493	 */
2494	if (mpls_mask->used_lses & ~1 ||
2495	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2496	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2497		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2498
2499	if (lse_mask->mpls_ttl) {
2500		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2501				 lse_key->mpls_ttl);
2502		if (err)
2503			return err;
2504	}
2505	if (lse_mask->mpls_tc) {
2506		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2507				 lse_key->mpls_tc);
2508		if (err)
2509			return err;
2510	}
2511	if (lse_mask->mpls_label) {
2512		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2513				  lse_key->mpls_label);
2514		if (err)
2515			return err;
2516	}
2517	if (lse_mask->mpls_bos) {
2518		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2519				 lse_key->mpls_bos);
2520		if (err)
2521			return err;
2522	}
2523	return 0;
2524}
2525
2526static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2527			  struct flow_dissector_key_ip *key,
2528			  struct flow_dissector_key_ip *mask)
2529{
2530	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2531	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2532	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2533	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2534
2535	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2536	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2537		return -1;
2538
2539	return 0;
2540}
2541
2542static int fl_dump_key_vlan(struct sk_buff *skb,
2543			    int vlan_id_key, int vlan_prio_key,
2544			    struct flow_dissector_key_vlan *vlan_key,
2545			    struct flow_dissector_key_vlan *vlan_mask)
2546{
2547	int err;
2548
2549	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2550		return 0;
2551	if (vlan_mask->vlan_id) {
2552		err = nla_put_u16(skb, vlan_id_key,
2553				  vlan_key->vlan_id);
2554		if (err)
2555			return err;
2556	}
2557	if (vlan_mask->vlan_priority) {
2558		err = nla_put_u8(skb, vlan_prio_key,
2559				 vlan_key->vlan_priority);
2560		if (err)
2561			return err;
2562	}
2563	return 0;
2564}
2565
2566static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2567			    u32 *flower_key, u32 *flower_mask,
2568			    u32 flower_flag_bit, u32 dissector_flag_bit)
2569{
2570	if (dissector_mask & dissector_flag_bit) {
2571		*flower_mask |= flower_flag_bit;
2572		if (dissector_key & dissector_flag_bit)
2573			*flower_key |= flower_flag_bit;
2574	}
2575}
2576
2577static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2578{
2579	u32 key, mask;
2580	__be32 _key, _mask;
2581	int err;
2582
2583	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2584		return 0;
2585
2586	key = 0;
2587	mask = 0;
2588
2589	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2590			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2591	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2592			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2593			FLOW_DIS_FIRST_FRAG);
2594
2595	_key = cpu_to_be32(key);
2596	_mask = cpu_to_be32(mask);
2597
2598	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2599	if (err)
2600		return err;
2601
2602	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2603}
2604
2605static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2606				  struct flow_dissector_key_enc_opts *enc_opts)
2607{
2608	struct geneve_opt *opt;
2609	struct nlattr *nest;
2610	int opt_off = 0;
2611
2612	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2613	if (!nest)
2614		goto nla_put_failure;
2615
2616	while (enc_opts->len > opt_off) {
2617		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2618
2619		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2620				 opt->opt_class))
2621			goto nla_put_failure;
2622		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2623			       opt->type))
2624			goto nla_put_failure;
2625		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2626			    opt->length * 4, opt->opt_data))
2627			goto nla_put_failure;
2628
2629		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2630	}
2631	nla_nest_end(skb, nest);
2632	return 0;
2633
2634nla_put_failure:
2635	nla_nest_cancel(skb, nest);
2636	return -EMSGSIZE;
2637}
2638
2639static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2640				 struct flow_dissector_key_enc_opts *enc_opts)
2641{
2642	struct vxlan_metadata *md;
2643	struct nlattr *nest;
2644
2645	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2646	if (!nest)
2647		goto nla_put_failure;
2648
2649	md = (struct vxlan_metadata *)&enc_opts->data[0];
2650	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2651		goto nla_put_failure;
2652
2653	nla_nest_end(skb, nest);
2654	return 0;
2655
2656nla_put_failure:
2657	nla_nest_cancel(skb, nest);
2658	return -EMSGSIZE;
2659}
2660
2661static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2662				  struct flow_dissector_key_enc_opts *enc_opts)
2663{
2664	struct erspan_metadata *md;
2665	struct nlattr *nest;
2666
2667	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2668	if (!nest)
2669		goto nla_put_failure;
2670
2671	md = (struct erspan_metadata *)&enc_opts->data[0];
2672	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2673		goto nla_put_failure;
2674
2675	if (md->version == 1 &&
2676	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2677		goto nla_put_failure;
2678
2679	if (md->version == 2 &&
2680	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2681			md->u.md2.dir) ||
2682	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2683			get_hwid(&md->u.md2))))
2684		goto nla_put_failure;
2685
2686	nla_nest_end(skb, nest);
2687	return 0;
2688
2689nla_put_failure:
2690	nla_nest_cancel(skb, nest);
2691	return -EMSGSIZE;
2692}
2693
2694static int fl_dump_key_ct(struct sk_buff *skb,
2695			  struct flow_dissector_key_ct *key,
2696			  struct flow_dissector_key_ct *mask)
2697{
2698	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2699	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2700			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2701			    sizeof(key->ct_state)))
2702		goto nla_put_failure;
2703
2704	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2705	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2706			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2707			    sizeof(key->ct_zone)))
2708		goto nla_put_failure;
2709
2710	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2711	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2712			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2713			    sizeof(key->ct_mark)))
2714		goto nla_put_failure;
2715
2716	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2717	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2718			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2719			    sizeof(key->ct_labels)))
2720		goto nla_put_failure;
2721
2722	return 0;
2723
2724nla_put_failure:
2725	return -EMSGSIZE;
2726}
2727
2728static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2729			       struct flow_dissector_key_enc_opts *enc_opts)
2730{
2731	struct nlattr *nest;
2732	int err;
2733
2734	if (!enc_opts->len)
2735		return 0;
2736
2737	nest = nla_nest_start_noflag(skb, enc_opt_type);
2738	if (!nest)
2739		goto nla_put_failure;
2740
2741	switch (enc_opts->dst_opt_type) {
2742	case TUNNEL_GENEVE_OPT:
2743		err = fl_dump_key_geneve_opt(skb, enc_opts);
2744		if (err)
2745			goto nla_put_failure;
2746		break;
2747	case TUNNEL_VXLAN_OPT:
2748		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2749		if (err)
2750			goto nla_put_failure;
2751		break;
2752	case TUNNEL_ERSPAN_OPT:
2753		err = fl_dump_key_erspan_opt(skb, enc_opts);
2754		if (err)
2755			goto nla_put_failure;
2756		break;
2757	default:
2758		goto nla_put_failure;
2759	}
2760	nla_nest_end(skb, nest);
2761	return 0;
2762
2763nla_put_failure:
2764	nla_nest_cancel(skb, nest);
2765	return -EMSGSIZE;
2766}
2767
2768static int fl_dump_key_enc_opt(struct sk_buff *skb,
2769			       struct flow_dissector_key_enc_opts *key_opts,
2770			       struct flow_dissector_key_enc_opts *msk_opts)
2771{
2772	int err;
2773
2774	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2775	if (err)
2776		return err;
2777
2778	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2779}
2780
2781static int fl_dump_key(struct sk_buff *skb, struct net *net,
2782		       struct fl_flow_key *key, struct fl_flow_key *mask)
2783{
2784	if (mask->meta.ingress_ifindex) {
2785		struct net_device *dev;
2786
2787		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2788		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2789			goto nla_put_failure;
2790	}
2791
2792	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2793			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2794			    sizeof(key->eth.dst)) ||
2795	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2796			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2797			    sizeof(key->eth.src)) ||
2798	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2799			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2800			    sizeof(key->basic.n_proto)))
2801		goto nla_put_failure;
2802
2803	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2804		goto nla_put_failure;
2805
2806	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2807			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2808		goto nla_put_failure;
2809
2810	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2811			     TCA_FLOWER_KEY_CVLAN_PRIO,
2812			     &key->cvlan, &mask->cvlan) ||
2813	    (mask->cvlan.vlan_tpid &&
2814	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2815			  key->cvlan.vlan_tpid)))
2816		goto nla_put_failure;
2817
2818	if (mask->basic.n_proto) {
2819		if (mask->cvlan.vlan_tpid) {
2820			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2821					 key->basic.n_proto))
2822				goto nla_put_failure;
2823		} else if (mask->vlan.vlan_tpid) {
2824			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2825					 key->basic.n_proto))
2826				goto nla_put_failure;
2827		}
2828	}
2829
2830	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2831	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2832	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2833			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2834			    sizeof(key->basic.ip_proto)) ||
2835	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2836		goto nla_put_failure;
2837
2838	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2839	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2840			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2841			     sizeof(key->ipv4.src)) ||
2842	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2843			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2844			     sizeof(key->ipv4.dst))))
2845		goto nla_put_failure;
2846	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2847		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2848				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2849				  sizeof(key->ipv6.src)) ||
2850		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2851				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2852				  sizeof(key->ipv6.dst))))
2853		goto nla_put_failure;
2854
2855	if (key->basic.ip_proto == IPPROTO_TCP &&
2856	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2857			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2858			     sizeof(key->tp.src)) ||
2859	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2860			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2861			     sizeof(key->tp.dst)) ||
2862	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2863			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2864			     sizeof(key->tcp.flags))))
2865		goto nla_put_failure;
2866	else if (key->basic.ip_proto == IPPROTO_UDP &&
2867		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2868				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2869				  sizeof(key->tp.src)) ||
2870		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2871				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2872				  sizeof(key->tp.dst))))
2873		goto nla_put_failure;
2874	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2875		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2876				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2877				  sizeof(key->tp.src)) ||
2878		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2879				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2880				  sizeof(key->tp.dst))))
2881		goto nla_put_failure;
2882	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2883		 key->basic.ip_proto == IPPROTO_ICMP &&
2884		 (fl_dump_key_val(skb, &key->icmp.type,
2885				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2886				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2887				  sizeof(key->icmp.type)) ||
2888		  fl_dump_key_val(skb, &key->icmp.code,
2889				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2890				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2891				  sizeof(key->icmp.code))))
2892		goto nla_put_failure;
2893	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2894		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2895		 (fl_dump_key_val(skb, &key->icmp.type,
2896				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2897				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2898				  sizeof(key->icmp.type)) ||
2899		  fl_dump_key_val(skb, &key->icmp.code,
2900				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2901				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2902				  sizeof(key->icmp.code))))
2903		goto nla_put_failure;
2904	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2905		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2906		 (fl_dump_key_val(skb, &key->arp.sip,
2907				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2908				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2909				  sizeof(key->arp.sip)) ||
2910		  fl_dump_key_val(skb, &key->arp.tip,
2911				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2912				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2913				  sizeof(key->arp.tip)) ||
2914		  fl_dump_key_val(skb, &key->arp.op,
2915				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2916				  TCA_FLOWER_KEY_ARP_OP_MASK,
2917				  sizeof(key->arp.op)) ||
2918		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2919				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2920				  sizeof(key->arp.sha)) ||
2921		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2922				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2923				  sizeof(key->arp.tha))))
2924		goto nla_put_failure;
2925
2926	if ((key->basic.ip_proto == IPPROTO_TCP ||
2927	     key->basic.ip_proto == IPPROTO_UDP ||
2928	     key->basic.ip_proto == IPPROTO_SCTP) &&
2929	     fl_dump_key_port_range(skb, key, mask))
2930		goto nla_put_failure;
2931
2932	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2933	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2934			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2935			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2936			    sizeof(key->enc_ipv4.src)) ||
2937	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2938			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2939			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2940			     sizeof(key->enc_ipv4.dst))))
2941		goto nla_put_failure;
2942	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2943		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2944			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2945			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2946			    sizeof(key->enc_ipv6.src)) ||
2947		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2948				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2949				 &mask->enc_ipv6.dst,
2950				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2951			    sizeof(key->enc_ipv6.dst))))
2952		goto nla_put_failure;
2953
2954	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2955			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2956			    sizeof(key->enc_key_id)) ||
2957	    fl_dump_key_val(skb, &key->enc_tp.src,
2958			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2959			    &mask->enc_tp.src,
2960			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2961			    sizeof(key->enc_tp.src)) ||
2962	    fl_dump_key_val(skb, &key->enc_tp.dst,
2963			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2964			    &mask->enc_tp.dst,
2965			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2966			    sizeof(key->enc_tp.dst)) ||
2967	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2968	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2969		goto nla_put_failure;
2970
2971	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2972		goto nla_put_failure;
2973
2974	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2975		goto nla_put_failure;
2976
2977	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
2978			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
2979			     sizeof(key->hash.hash)))
2980		goto nla_put_failure;
2981
2982	return 0;
2983
2984nla_put_failure:
2985	return -EMSGSIZE;
2986}
2987
2988static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2989		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2990{
2991	struct cls_fl_filter *f = fh;
2992	struct nlattr *nest;
2993	struct fl_flow_key *key, *mask;
2994	bool skip_hw;
2995
2996	if (!f)
2997		return skb->len;
2998
2999	t->tcm_handle = f->handle;
3000
3001	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3002	if (!nest)
3003		goto nla_put_failure;
3004
3005	spin_lock(&tp->lock);
3006
3007	if (f->res.classid &&
3008	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3009		goto nla_put_failure_locked;
3010
3011	key = &f->key;
3012	mask = &f->mask->key;
3013	skip_hw = tc_skip_hw(f->flags);
3014
3015	if (fl_dump_key(skb, net, key, mask))
3016		goto nla_put_failure_locked;
3017
3018	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3019		goto nla_put_failure_locked;
3020
3021	spin_unlock(&tp->lock);
3022
3023	if (!skip_hw)
3024		fl_hw_update_stats(tp, f, rtnl_held);
3025
3026	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3027		goto nla_put_failure;
3028
3029	if (tcf_exts_dump(skb, &f->exts))
3030		goto nla_put_failure;
3031
3032	nla_nest_end(skb, nest);
3033
3034	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3035		goto nla_put_failure;
3036
3037	return skb->len;
3038
3039nla_put_failure_locked:
3040	spin_unlock(&tp->lock);
3041nla_put_failure:
3042	nla_nest_cancel(skb, nest);
3043	return -1;
3044}
3045
3046static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3047			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3048{
3049	struct cls_fl_filter *f = fh;
3050	struct nlattr *nest;
3051	bool skip_hw;
3052
3053	if (!f)
3054		return skb->len;
3055
3056	t->tcm_handle = f->handle;
3057
3058	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3059	if (!nest)
3060		goto nla_put_failure;
3061
3062	spin_lock(&tp->lock);
3063
3064	skip_hw = tc_skip_hw(f->flags);
3065
3066	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3067		goto nla_put_failure_locked;
3068
3069	spin_unlock(&tp->lock);
3070
3071	if (!skip_hw)
3072		fl_hw_update_stats(tp, f, rtnl_held);
3073
3074	if (tcf_exts_terse_dump(skb, &f->exts))
3075		goto nla_put_failure;
3076
3077	nla_nest_end(skb, nest);
3078
3079	return skb->len;
3080
3081nla_put_failure_locked:
3082	spin_unlock(&tp->lock);
3083nla_put_failure:
3084	nla_nest_cancel(skb, nest);
3085	return -1;
3086}
3087
3088static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3089{
3090	struct fl_flow_tmplt *tmplt = tmplt_priv;
3091	struct fl_flow_key *key, *mask;
3092	struct nlattr *nest;
3093
3094	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3095	if (!nest)
3096		goto nla_put_failure;
3097
3098	key = &tmplt->dummy_key;
3099	mask = &tmplt->mask;
3100
3101	if (fl_dump_key(skb, net, key, mask))
3102		goto nla_put_failure;
3103
3104	nla_nest_end(skb, nest);
3105
3106	return skb->len;
3107
3108nla_put_failure:
3109	nla_nest_cancel(skb, nest);
3110	return -EMSGSIZE;
3111}
3112
3113static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3114			  unsigned long base)
3115{
3116	struct cls_fl_filter *f = fh;
3117
3118	if (f && f->res.classid == classid) {
3119		if (cl)
3120			__tcf_bind_filter(q, &f->res, base);
3121		else
3122			__tcf_unbind_filter(q, &f->res);
3123	}
3124}
3125
3126static bool fl_delete_empty(struct tcf_proto *tp)
3127{
3128	struct cls_fl_head *head = fl_head_dereference(tp);
3129
3130	spin_lock(&tp->lock);
3131	tp->deleting = idr_is_empty(&head->handle_idr);
3132	spin_unlock(&tp->lock);
3133
3134	return tp->deleting;
3135}
3136
3137static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3138	.kind		= "flower",
3139	.classify	= fl_classify,
3140	.init		= fl_init,
3141	.destroy	= fl_destroy,
3142	.get		= fl_get,
3143	.put		= fl_put,
3144	.change		= fl_change,
3145	.delete		= fl_delete,
3146	.delete_empty	= fl_delete_empty,
3147	.walk		= fl_walk,
3148	.reoffload	= fl_reoffload,
3149	.hw_add		= fl_hw_add,
3150	.hw_del		= fl_hw_del,
3151	.dump		= fl_dump,
3152	.terse_dump	= fl_terse_dump,
3153	.bind_class	= fl_bind_class,
3154	.tmplt_create	= fl_tmplt_create,
3155	.tmplt_destroy	= fl_tmplt_destroy,
3156	.tmplt_dump	= fl_tmplt_dump,
3157	.owner		= THIS_MODULE,
3158	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3159};
3160
3161static int __init cls_fl_init(void)
3162{
3163	return register_tcf_proto_ops(&cls_fl_ops);
3164}
3165
3166static void __exit cls_fl_exit(void)
3167{
3168	unregister_tcf_proto_ops(&cls_fl_ops);
3169}
3170
3171module_init(cls_fl_init);
3172module_exit(cls_fl_exit);
3173
3174MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3175MODULE_DESCRIPTION("Flower classifier");
3176MODULE_LICENSE("GPL v2");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_flower.c		Flower classifier
   4 *
   5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/init.h>
  10#include <linux/module.h>
  11#include <linux/rhashtable.h>
  12#include <linux/workqueue.h>
  13#include <linux/refcount.h>
  14
  15#include <linux/if_ether.h>
  16#include <linux/in6.h>
  17#include <linux/ip.h>
  18#include <linux/mpls.h>
  19
  20#include <net/sch_generic.h>
  21#include <net/pkt_cls.h>
  22#include <net/ip.h>
  23#include <net/flow_dissector.h>
  24#include <net/geneve.h>
 
 
  25
  26#include <net/dst.h>
  27#include <net/dst_metadata.h>
  28
  29#include <uapi/linux/netfilter/nf_conntrack_common.h>
  30
  31struct fl_flow_key {
  32	struct flow_dissector_key_meta meta;
  33	struct flow_dissector_key_control control;
  34	struct flow_dissector_key_control enc_control;
  35	struct flow_dissector_key_basic basic;
  36	struct flow_dissector_key_eth_addrs eth;
  37	struct flow_dissector_key_vlan vlan;
  38	struct flow_dissector_key_vlan cvlan;
  39	union {
  40		struct flow_dissector_key_ipv4_addrs ipv4;
  41		struct flow_dissector_key_ipv6_addrs ipv6;
  42	};
  43	struct flow_dissector_key_ports tp;
  44	struct flow_dissector_key_icmp icmp;
  45	struct flow_dissector_key_arp arp;
  46	struct flow_dissector_key_keyid enc_key_id;
  47	union {
  48		struct flow_dissector_key_ipv4_addrs enc_ipv4;
  49		struct flow_dissector_key_ipv6_addrs enc_ipv6;
  50	};
  51	struct flow_dissector_key_ports enc_tp;
  52	struct flow_dissector_key_mpls mpls;
  53	struct flow_dissector_key_tcp tcp;
  54	struct flow_dissector_key_ip ip;
  55	struct flow_dissector_key_ip enc_ip;
  56	struct flow_dissector_key_enc_opts enc_opts;
  57	struct flow_dissector_key_ports tp_min;
  58	struct flow_dissector_key_ports tp_max;
 
 
 
 
 
  59	struct flow_dissector_key_ct ct;
 
  60} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  61
  62struct fl_flow_mask_range {
  63	unsigned short int start;
  64	unsigned short int end;
  65};
  66
  67struct fl_flow_mask {
  68	struct fl_flow_key key;
  69	struct fl_flow_mask_range range;
  70	u32 flags;
  71	struct rhash_head ht_node;
  72	struct rhashtable ht;
  73	struct rhashtable_params filter_ht_params;
  74	struct flow_dissector dissector;
  75	struct list_head filters;
  76	struct rcu_work rwork;
  77	struct list_head list;
  78	refcount_t refcnt;
  79};
  80
  81struct fl_flow_tmplt {
  82	struct fl_flow_key dummy_key;
  83	struct fl_flow_key mask;
  84	struct flow_dissector dissector;
  85	struct tcf_chain *chain;
  86};
  87
  88struct cls_fl_head {
  89	struct rhashtable ht;
  90	spinlock_t masks_lock; /* Protect masks list */
  91	struct list_head masks;
  92	struct list_head hw_filters;
  93	struct rcu_work rwork;
  94	struct idr handle_idr;
  95};
  96
  97struct cls_fl_filter {
  98	struct fl_flow_mask *mask;
  99	struct rhash_head ht_node;
 100	struct fl_flow_key mkey;
 101	struct tcf_exts exts;
 102	struct tcf_result res;
 103	struct fl_flow_key key;
 104	struct list_head list;
 105	struct list_head hw_list;
 106	u32 handle;
 107	u32 flags;
 108	u32 in_hw_count;
 109	struct rcu_work rwork;
 110	struct net_device *hw_dev;
 111	/* Flower classifier is unlocked, which means that its reference counter
 112	 * can be changed concurrently without any kind of external
 113	 * synchronization. Use atomic reference counter to be concurrency-safe.
 114	 */
 115	refcount_t refcnt;
 116	bool deleted;
 117};
 118
 119static const struct rhashtable_params mask_ht_params = {
 120	.key_offset = offsetof(struct fl_flow_mask, key),
 121	.key_len = sizeof(struct fl_flow_key),
 122	.head_offset = offsetof(struct fl_flow_mask, ht_node),
 123	.automatic_shrinking = true,
 124};
 125
 126static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
 127{
 128	return mask->range.end - mask->range.start;
 129}
 130
 131static void fl_mask_update_range(struct fl_flow_mask *mask)
 132{
 133	const u8 *bytes = (const u8 *) &mask->key;
 134	size_t size = sizeof(mask->key);
 135	size_t i, first = 0, last;
 136
 137	for (i = 0; i < size; i++) {
 138		if (bytes[i]) {
 139			first = i;
 140			break;
 141		}
 142	}
 143	last = first;
 144	for (i = size - 1; i != first; i--) {
 145		if (bytes[i]) {
 146			last = i;
 147			break;
 148		}
 149	}
 150	mask->range.start = rounddown(first, sizeof(long));
 151	mask->range.end = roundup(last + 1, sizeof(long));
 152}
 153
 154static void *fl_key_get_start(struct fl_flow_key *key,
 155			      const struct fl_flow_mask *mask)
 156{
 157	return (u8 *) key + mask->range.start;
 158}
 159
 160static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
 161			      struct fl_flow_mask *mask)
 162{
 163	const long *lkey = fl_key_get_start(key, mask);
 164	const long *lmask = fl_key_get_start(&mask->key, mask);
 165	long *lmkey = fl_key_get_start(mkey, mask);
 166	int i;
 167
 168	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
 169		*lmkey++ = *lkey++ & *lmask++;
 170}
 171
 172static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
 173			       struct fl_flow_mask *mask)
 174{
 175	const long *lmask = fl_key_get_start(&mask->key, mask);
 176	const long *ltmplt;
 177	int i;
 178
 179	if (!tmplt)
 180		return true;
 181	ltmplt = fl_key_get_start(&tmplt->mask, mask);
 182	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
 183		if (~*ltmplt++ & *lmask++)
 184			return false;
 185	}
 186	return true;
 187}
 188
 189static void fl_clear_masked_range(struct fl_flow_key *key,
 190				  struct fl_flow_mask *mask)
 191{
 192	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
 193}
 194
 195static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
 196				  struct fl_flow_key *key,
 197				  struct fl_flow_key *mkey)
 198{
 199	__be16 min_mask, max_mask, min_val, max_val;
 200
 201	min_mask = htons(filter->mask->key.tp_min.dst);
 202	max_mask = htons(filter->mask->key.tp_max.dst);
 203	min_val = htons(filter->key.tp_min.dst);
 204	max_val = htons(filter->key.tp_max.dst);
 205
 206	if (min_mask && max_mask) {
 207		if (htons(key->tp.dst) < min_val ||
 208		    htons(key->tp.dst) > max_val)
 209			return false;
 210
 211		/* skb does not have min and max values */
 212		mkey->tp_min.dst = filter->mkey.tp_min.dst;
 213		mkey->tp_max.dst = filter->mkey.tp_max.dst;
 214	}
 215	return true;
 216}
 217
 218static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
 219				  struct fl_flow_key *key,
 220				  struct fl_flow_key *mkey)
 221{
 222	__be16 min_mask, max_mask, min_val, max_val;
 223
 224	min_mask = htons(filter->mask->key.tp_min.src);
 225	max_mask = htons(filter->mask->key.tp_max.src);
 226	min_val = htons(filter->key.tp_min.src);
 227	max_val = htons(filter->key.tp_max.src);
 228
 229	if (min_mask && max_mask) {
 230		if (htons(key->tp.src) < min_val ||
 231		    htons(key->tp.src) > max_val)
 232			return false;
 233
 234		/* skb does not have min and max values */
 235		mkey->tp_min.src = filter->mkey.tp_min.src;
 236		mkey->tp_max.src = filter->mkey.tp_max.src;
 237	}
 238	return true;
 239}
 240
 241static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
 242					 struct fl_flow_key *mkey)
 243{
 244	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
 245				      mask->filter_ht_params);
 246}
 247
 248static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
 249					     struct fl_flow_key *mkey,
 250					     struct fl_flow_key *key)
 251{
 252	struct cls_fl_filter *filter, *f;
 253
 254	list_for_each_entry_rcu(filter, &mask->filters, list) {
 255		if (!fl_range_port_dst_cmp(filter, key, mkey))
 256			continue;
 257
 258		if (!fl_range_port_src_cmp(filter, key, mkey))
 259			continue;
 260
 261		f = __fl_lookup(mask, mkey);
 262		if (f)
 263			return f;
 264	}
 265	return NULL;
 266}
 267
 268static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
 269				       struct fl_flow_key *mkey,
 270				       struct fl_flow_key *key)
 271{
 
 
 
 272	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
 273		return fl_lookup_range(mask, mkey, key);
 274
 275	return __fl_lookup(mask, mkey);
 276}
 277
 278static u16 fl_ct_info_to_flower_map[] = {
 279	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 280					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
 281	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 282					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
 283	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 284					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
 285	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 286					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
 287	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
 288					TCA_FLOWER_KEY_CT_FLAGS_NEW,
 289};
 290
 291static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 292		       struct tcf_result *res)
 293{
 294	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
 295	struct fl_flow_key skb_mkey;
 296	struct fl_flow_key skb_key;
 297	struct fl_flow_mask *mask;
 298	struct cls_fl_filter *f;
 299
 300	list_for_each_entry_rcu(mask, &head->masks, list) {
 
 301		fl_clear_masked_range(&skb_key, mask);
 302
 303		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
 304		/* skb_flow_dissect() does not set n_proto in case an unknown
 305		 * protocol, so do it rather here.
 306		 */
 307		skb_key.basic.n_proto = skb->protocol;
 308		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
 309		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
 310				    fl_ct_info_to_flower_map,
 311				    ARRAY_SIZE(fl_ct_info_to_flower_map));
 
 312		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
 313
 314		fl_set_masked_key(&skb_mkey, &skb_key, mask);
 315
 316		f = fl_lookup(mask, &skb_mkey, &skb_key);
 317		if (f && !tc_skip_sw(f->flags)) {
 318			*res = f->res;
 319			return tcf_exts_exec(skb, &f->exts, res);
 320		}
 321	}
 322	return -1;
 323}
 324
 325static int fl_init(struct tcf_proto *tp)
 326{
 327	struct cls_fl_head *head;
 328
 329	head = kzalloc(sizeof(*head), GFP_KERNEL);
 330	if (!head)
 331		return -ENOBUFS;
 332
 333	spin_lock_init(&head->masks_lock);
 334	INIT_LIST_HEAD_RCU(&head->masks);
 335	INIT_LIST_HEAD(&head->hw_filters);
 336	rcu_assign_pointer(tp->root, head);
 337	idr_init(&head->handle_idr);
 338
 339	return rhashtable_init(&head->ht, &mask_ht_params);
 340}
 341
 342static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
 343{
 344	/* temporary masks don't have their filters list and ht initialized */
 345	if (mask_init_done) {
 346		WARN_ON(!list_empty(&mask->filters));
 347		rhashtable_destroy(&mask->ht);
 348	}
 349	kfree(mask);
 350}
 351
 352static void fl_mask_free_work(struct work_struct *work)
 353{
 354	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
 355						 struct fl_flow_mask, rwork);
 356
 357	fl_mask_free(mask, true);
 358}
 359
 360static void fl_uninit_mask_free_work(struct work_struct *work)
 361{
 362	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
 363						 struct fl_flow_mask, rwork);
 364
 365	fl_mask_free(mask, false);
 366}
 367
 368static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
 369{
 370	if (!refcount_dec_and_test(&mask->refcnt))
 371		return false;
 372
 373	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
 374
 375	spin_lock(&head->masks_lock);
 376	list_del_rcu(&mask->list);
 377	spin_unlock(&head->masks_lock);
 378
 379	tcf_queue_work(&mask->rwork, fl_mask_free_work);
 380
 381	return true;
 382}
 383
 384static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
 385{
 386	/* Flower classifier only changes root pointer during init and destroy.
 387	 * Users must obtain reference to tcf_proto instance before calling its
 388	 * API, so tp->root pointer is protected from concurrent call to
 389	 * fl_destroy() by reference counting.
 390	 */
 391	return rcu_dereference_raw(tp->root);
 392}
 393
 394static void __fl_destroy_filter(struct cls_fl_filter *f)
 395{
 396	tcf_exts_destroy(&f->exts);
 397	tcf_exts_put_net(&f->exts);
 398	kfree(f);
 399}
 400
 401static void fl_destroy_filter_work(struct work_struct *work)
 402{
 403	struct cls_fl_filter *f = container_of(to_rcu_work(work),
 404					struct cls_fl_filter, rwork);
 405
 406	__fl_destroy_filter(f);
 407}
 408
 409static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
 410				 bool rtnl_held, struct netlink_ext_ack *extack)
 411{
 412	struct tcf_block *block = tp->chain->block;
 413	struct flow_cls_offload cls_flower = {};
 414
 415	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 416	cls_flower.command = FLOW_CLS_DESTROY;
 417	cls_flower.cookie = (unsigned long) f;
 418
 419	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
 420			    &f->flags, &f->in_hw_count, rtnl_held);
 421
 422}
 423
 424static int fl_hw_replace_filter(struct tcf_proto *tp,
 425				struct cls_fl_filter *f, bool rtnl_held,
 426				struct netlink_ext_ack *extack)
 427{
 428	struct tcf_block *block = tp->chain->block;
 429	struct flow_cls_offload cls_flower = {};
 430	bool skip_sw = tc_skip_sw(f->flags);
 431	int err = 0;
 432
 433	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
 434	if (!cls_flower.rule)
 435		return -ENOMEM;
 436
 437	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 438	cls_flower.command = FLOW_CLS_REPLACE;
 439	cls_flower.cookie = (unsigned long) f;
 440	cls_flower.rule->match.dissector = &f->mask->dissector;
 441	cls_flower.rule->match.mask = &f->mask->key;
 442	cls_flower.rule->match.key = &f->mkey;
 443	cls_flower.classid = f->res.classid;
 444
 445	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
 446				   rtnl_held);
 447	if (err) {
 448		kfree(cls_flower.rule);
 449		if (skip_sw) {
 450			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
 451			return err;
 452		}
 453		return 0;
 454	}
 455
 456	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
 457			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
 458	tc_cleanup_flow_action(&cls_flower.rule->action);
 459	kfree(cls_flower.rule);
 460
 461	if (err) {
 462		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
 463		return err;
 464	}
 465
 466	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
 467		return -EINVAL;
 468
 469	return 0;
 470}
 471
 472static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
 473			       bool rtnl_held)
 474{
 475	struct tcf_block *block = tp->chain->block;
 476	struct flow_cls_offload cls_flower = {};
 477
 478	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
 479	cls_flower.command = FLOW_CLS_STATS;
 480	cls_flower.cookie = (unsigned long) f;
 481	cls_flower.classid = f->res.classid;
 482
 483	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
 484			 rtnl_held);
 485
 486	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
 487			      cls_flower.stats.pkts,
 488			      cls_flower.stats.lastused);
 
 
 
 489}
 490
 491static void __fl_put(struct cls_fl_filter *f)
 492{
 493	if (!refcount_dec_and_test(&f->refcnt))
 494		return;
 495
 496	if (tcf_exts_get_net(&f->exts))
 497		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
 498	else
 499		__fl_destroy_filter(f);
 500}
 501
 502static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
 503{
 504	struct cls_fl_filter *f;
 505
 506	rcu_read_lock();
 507	f = idr_find(&head->handle_idr, handle);
 508	if (f && !refcount_inc_not_zero(&f->refcnt))
 509		f = NULL;
 510	rcu_read_unlock();
 511
 512	return f;
 513}
 514
 515static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
 516		       bool *last, bool rtnl_held,
 517		       struct netlink_ext_ack *extack)
 518{
 519	struct cls_fl_head *head = fl_head_dereference(tp);
 520
 521	*last = false;
 522
 523	spin_lock(&tp->lock);
 524	if (f->deleted) {
 525		spin_unlock(&tp->lock);
 526		return -ENOENT;
 527	}
 528
 529	f->deleted = true;
 530	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
 531			       f->mask->filter_ht_params);
 532	idr_remove(&head->handle_idr, f->handle);
 533	list_del_rcu(&f->list);
 534	spin_unlock(&tp->lock);
 535
 536	*last = fl_mask_put(head, f->mask);
 537	if (!tc_skip_hw(f->flags))
 538		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
 539	tcf_unbind_filter(tp, &f->res);
 540	__fl_put(f);
 541
 542	return 0;
 543}
 544
 545static void fl_destroy_sleepable(struct work_struct *work)
 546{
 547	struct cls_fl_head *head = container_of(to_rcu_work(work),
 548						struct cls_fl_head,
 549						rwork);
 550
 551	rhashtable_destroy(&head->ht);
 552	kfree(head);
 553	module_put(THIS_MODULE);
 554}
 555
 556static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
 557		       struct netlink_ext_ack *extack)
 558{
 559	struct cls_fl_head *head = fl_head_dereference(tp);
 560	struct fl_flow_mask *mask, *next_mask;
 561	struct cls_fl_filter *f, *next;
 562	bool last;
 563
 564	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
 565		list_for_each_entry_safe(f, next, &mask->filters, list) {
 566			__fl_delete(tp, f, &last, rtnl_held, extack);
 567			if (last)
 568				break;
 569		}
 570	}
 571	idr_destroy(&head->handle_idr);
 572
 573	__module_get(THIS_MODULE);
 574	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
 575}
 576
 577static void fl_put(struct tcf_proto *tp, void *arg)
 578{
 579	struct cls_fl_filter *f = arg;
 580
 581	__fl_put(f);
 582}
 583
 584static void *fl_get(struct tcf_proto *tp, u32 handle)
 585{
 586	struct cls_fl_head *head = fl_head_dereference(tp);
 587
 588	return __fl_get(head, handle);
 589}
 590
 591static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
 592	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
 593	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
 594	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
 595					    .len = IFNAMSIZ },
 596	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
 597	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
 598	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
 599	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
 600	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
 601	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
 602	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
 603	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
 604	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
 605	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
 606	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
 607	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
 608	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
 609	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
 610	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
 611	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
 612	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
 613	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
 614	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
 615	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
 616	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
 617	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
 618	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
 619	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
 620	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
 621	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
 622	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
 623	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
 624	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
 625	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
 626	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
 627	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
 628	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
 629	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
 630	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
 631	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
 632	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
 633	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
 634	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
 635	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
 636	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
 637	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
 638	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
 639	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
 640	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
 641	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
 642	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
 643	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
 644	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
 645	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
 646	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
 647	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
 648	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
 649	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
 650	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
 651	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
 652	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
 653	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
 654	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
 655	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
 656	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
 657	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
 658	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
 659	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
 660	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
 661	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
 
 662	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
 663	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
 664	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
 665	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
 666	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
 667	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
 668	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
 669	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
 670	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
 671	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
 672	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
 673	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
 674	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
 675	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
 676	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
 677	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
 678	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
 679	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
 680	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
 681	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
 682	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
 683	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
 684					    .len = 128 / BITS_PER_BYTE },
 685	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
 686					    .len = 128 / BITS_PER_BYTE },
 
 
 
 
 687};
 688
 689static const struct nla_policy
 690enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
 
 
 691	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
 
 
 692};
 693
 694static const struct nla_policy
 695geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
 696	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
 697	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
 698	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
 699						       .len = 128 },
 700};
 701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 702static void fl_set_key_val(struct nlattr **tb,
 703			   void *val, int val_type,
 704			   void *mask, int mask_type, int len)
 705{
 706	if (!tb[val_type])
 707		return;
 708	nla_memcpy(val, tb[val_type], len);
 709	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
 710		memset(mask, 0xff, len);
 711	else
 712		nla_memcpy(mask, tb[mask_type], len);
 713}
 714
 715static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
 716				 struct fl_flow_key *mask)
 
 717{
 718	fl_set_key_val(tb, &key->tp_min.dst,
 719		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
 720		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
 721	fl_set_key_val(tb, &key->tp_max.dst,
 722		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
 723		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
 724	fl_set_key_val(tb, &key->tp_min.src,
 725		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
 726		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
 727	fl_set_key_val(tb, &key->tp_max.src,
 728		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
 729		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
 730
 731	if ((mask->tp_min.dst && mask->tp_max.dst &&
 732	     htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
 733	     (mask->tp_min.src && mask->tp_max.src &&
 734	      htons(key->tp_max.src) <= htons(key->tp_min.src)))
 
 
 
 
 
 
 
 
 
 
 735		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 736
 737	return 0;
 738}
 739
 740static int fl_set_key_mpls(struct nlattr **tb,
 741			   struct flow_dissector_key_mpls *key_val,
 742			   struct flow_dissector_key_mpls *key_mask)
 
 743{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 744	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
 745		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
 746		key_mask->mpls_ttl = MPLS_TTL_MASK;
 
 
 747	}
 748	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
 749		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
 750
 751		if (bos & ~MPLS_BOS_MASK)
 
 
 
 752			return -EINVAL;
 753		key_val->mpls_bos = bos;
 754		key_mask->mpls_bos = MPLS_BOS_MASK;
 
 
 
 755	}
 756	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
 757		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
 758
 759		if (tc & ~MPLS_TC_MASK)
 
 
 
 760			return -EINVAL;
 761		key_val->mpls_tc = tc;
 762		key_mask->mpls_tc = MPLS_TC_MASK;
 
 
 
 763	}
 764	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
 765		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
 766
 767		if (label & ~MPLS_LABEL_MASK)
 
 
 
 768			return -EINVAL;
 769		key_val->mpls_label = label;
 770		key_mask->mpls_label = MPLS_LABEL_MASK;
 
 
 
 771	}
 772	return 0;
 773}
 774
 775static void fl_set_key_vlan(struct nlattr **tb,
 776			    __be16 ethertype,
 777			    int vlan_id_key, int vlan_prio_key,
 778			    struct flow_dissector_key_vlan *key_val,
 779			    struct flow_dissector_key_vlan *key_mask)
 780{
 781#define VLAN_PRIORITY_MASK	0x7
 782
 783	if (tb[vlan_id_key]) {
 784		key_val->vlan_id =
 785			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
 786		key_mask->vlan_id = VLAN_VID_MASK;
 787	}
 788	if (tb[vlan_prio_key]) {
 789		key_val->vlan_priority =
 790			nla_get_u8(tb[vlan_prio_key]) &
 791			VLAN_PRIORITY_MASK;
 792		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
 793	}
 794	key_val->vlan_tpid = ethertype;
 795	key_mask->vlan_tpid = cpu_to_be16(~0);
 796}
 797
 798static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
 799			    u32 *dissector_key, u32 *dissector_mask,
 800			    u32 flower_flag_bit, u32 dissector_flag_bit)
 801{
 802	if (flower_mask & flower_flag_bit) {
 803		*dissector_mask |= dissector_flag_bit;
 804		if (flower_key & flower_flag_bit)
 805			*dissector_key |= dissector_flag_bit;
 806	}
 807}
 808
 809static int fl_set_key_flags(struct nlattr **tb,
 810			    u32 *flags_key, u32 *flags_mask)
 811{
 812	u32 key, mask;
 813
 814	/* mask is mandatory for flags */
 815	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
 
 816		return -EINVAL;
 
 817
 818	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
 819	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
 820
 821	*flags_key  = 0;
 822	*flags_mask = 0;
 823
 824	fl_set_key_flag(key, mask, flags_key, flags_mask,
 825			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
 826	fl_set_key_flag(key, mask, flags_key, flags_mask,
 827			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
 828			FLOW_DIS_FIRST_FRAG);
 829
 830	return 0;
 831}
 832
 833static void fl_set_key_ip(struct nlattr **tb, bool encap,
 834			  struct flow_dissector_key_ip *key,
 835			  struct flow_dissector_key_ip *mask)
 836{
 837	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
 838	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
 839	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
 840	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
 841
 842	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
 843	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
 844}
 845
 846static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
 847			     int depth, int option_len,
 848			     struct netlink_ext_ack *extack)
 849{
 850	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
 851	struct nlattr *class = NULL, *type = NULL, *data = NULL;
 852	struct geneve_opt *opt;
 853	int err, data_len = 0;
 854
 855	if (option_len > sizeof(struct geneve_opt))
 856		data_len = option_len - sizeof(struct geneve_opt);
 857
 858	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
 859	memset(opt, 0xff, option_len);
 860	opt->length = data_len / 4;
 861	opt->r1 = 0;
 862	opt->r2 = 0;
 863	opt->r3 = 0;
 864
 865	/* If no mask has been prodived we assume an exact match. */
 866	if (!depth)
 867		return sizeof(struct geneve_opt) + data_len;
 868
 869	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
 870		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
 871		return -EINVAL;
 872	}
 873
 874	err = nla_parse_nested_deprecated(tb,
 875					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
 876					  nla, geneve_opt_policy, extack);
 877	if (err < 0)
 878		return err;
 879
 880	/* We are not allowed to omit any of CLASS, TYPE or DATA
 881	 * fields from the key.
 882	 */
 883	if (!option_len &&
 884	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
 885	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
 886	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
 887		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
 888		return -EINVAL;
 889	}
 890
 891	/* Omitting any of CLASS, TYPE or DATA fields is allowed
 892	 * for the mask.
 893	 */
 894	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
 895		int new_len = key->enc_opts.len;
 896
 897		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
 898		data_len = nla_len(data);
 899		if (data_len < 4) {
 900			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
 901			return -ERANGE;
 902		}
 903		if (data_len % 4) {
 904			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
 905			return -ERANGE;
 906		}
 907
 908		new_len += sizeof(struct geneve_opt) + data_len;
 909		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
 910		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
 911			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
 912			return -ERANGE;
 913		}
 914		opt->length = data_len / 4;
 915		memcpy(opt->opt_data, nla_data(data), data_len);
 916	}
 917
 918	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
 919		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
 920		opt->opt_class = nla_get_be16(class);
 921	}
 922
 923	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
 924		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
 925		opt->type = nla_get_u8(type);
 926	}
 927
 928	return sizeof(struct geneve_opt) + data_len;
 929}
 930
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 931static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
 932			  struct fl_flow_key *mask,
 933			  struct netlink_ext_ack *extack)
 934{
 935	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
 936	int err, option_len, key_depth, msk_depth = 0;
 937
 938	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
 939					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
 940					     enc_opts_policy, extack);
 941	if (err)
 942		return err;
 943
 944	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
 945
 946	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
 947		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
 948						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
 949						     enc_opts_policy, extack);
 950		if (err)
 951			return err;
 952
 953		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
 954		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
 955	}
 956
 957	nla_for_each_attr(nla_opt_key, nla_enc_key,
 958			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
 959		switch (nla_type(nla_opt_key)) {
 960		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
 
 
 
 
 
 961			option_len = 0;
 962			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
 963			option_len = fl_set_geneve_opt(nla_opt_key, key,
 964						       key_depth, option_len,
 965						       extack);
 966			if (option_len < 0)
 967				return option_len;
 968
 969			key->enc_opts.len += option_len;
 970			/* At the same time we need to parse through the mask
 971			 * in order to verify exact and mask attribute lengths.
 972			 */
 973			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
 974			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
 975						       msk_depth, option_len,
 976						       extack);
 977			if (option_len < 0)
 978				return option_len;
 979
 980			mask->enc_opts.len += option_len;
 981			if (key->enc_opts.len != mask->enc_opts.len) {
 982				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
 983				return -EINVAL;
 984			}
 985
 986			if (msk_depth)
 987				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
 988			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 989		default:
 990			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
 991			return -EINVAL;
 992		}
 993	}
 994
 995	return 0;
 996}
 997
 998static int fl_set_key_ct(struct nlattr **tb,
 999			 struct flow_dissector_key_ct *key,
1000			 struct flow_dissector_key_ct *mask,
1001			 struct netlink_ext_ack *extack)
1002{
1003	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1004		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1005			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1006			return -EOPNOTSUPP;
1007		}
1008		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1009			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1010			       sizeof(key->ct_state));
1011	}
1012	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1013		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1014			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1015			return -EOPNOTSUPP;
1016		}
1017		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1018			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1019			       sizeof(key->ct_zone));
1020	}
1021	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1022		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1023			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1024			return -EOPNOTSUPP;
1025		}
1026		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1027			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1028			       sizeof(key->ct_mark));
1029	}
1030	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1031		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1032			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1033			return -EOPNOTSUPP;
1034		}
1035		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1036			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1037			       sizeof(key->ct_labels));
1038	}
1039
1040	return 0;
1041}
1042
1043static int fl_set_key(struct net *net, struct nlattr **tb,
1044		      struct fl_flow_key *key, struct fl_flow_key *mask,
1045		      struct netlink_ext_ack *extack)
1046{
1047	__be16 ethertype;
1048	int ret = 0;
1049
1050	if (tb[TCA_FLOWER_INDEV]) {
1051		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1052		if (err < 0)
1053			return err;
1054		key->meta.ingress_ifindex = err;
1055		mask->meta.ingress_ifindex = 0xffffffff;
1056	}
1057
1058	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1059		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1060		       sizeof(key->eth.dst));
1061	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1062		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1063		       sizeof(key->eth.src));
1064
1065	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1066		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1067
1068		if (eth_type_vlan(ethertype)) {
1069			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1070					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1071					&mask->vlan);
1072
1073			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1074				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1075				if (eth_type_vlan(ethertype)) {
1076					fl_set_key_vlan(tb, ethertype,
1077							TCA_FLOWER_KEY_CVLAN_ID,
1078							TCA_FLOWER_KEY_CVLAN_PRIO,
1079							&key->cvlan, &mask->cvlan);
1080					fl_set_key_val(tb, &key->basic.n_proto,
1081						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1082						       &mask->basic.n_proto,
1083						       TCA_FLOWER_UNSPEC,
1084						       sizeof(key->basic.n_proto));
1085				} else {
1086					key->basic.n_proto = ethertype;
1087					mask->basic.n_proto = cpu_to_be16(~0);
1088				}
1089			}
1090		} else {
1091			key->basic.n_proto = ethertype;
1092			mask->basic.n_proto = cpu_to_be16(~0);
1093		}
1094	}
1095
1096	if (key->basic.n_proto == htons(ETH_P_IP) ||
1097	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1098		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1099			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1100			       sizeof(key->basic.ip_proto));
1101		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1102	}
1103
1104	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1105		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1106		mask->control.addr_type = ~0;
1107		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1108			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1109			       sizeof(key->ipv4.src));
1110		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1111			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1112			       sizeof(key->ipv4.dst));
1113	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1114		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1115		mask->control.addr_type = ~0;
1116		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1117			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1118			       sizeof(key->ipv6.src));
1119		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1120			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1121			       sizeof(key->ipv6.dst));
1122	}
1123
1124	if (key->basic.ip_proto == IPPROTO_TCP) {
1125		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1126			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1127			       sizeof(key->tp.src));
1128		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1129			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1130			       sizeof(key->tp.dst));
1131		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1132			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1133			       sizeof(key->tcp.flags));
1134	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1135		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1136			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1137			       sizeof(key->tp.src));
1138		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1139			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1140			       sizeof(key->tp.dst));
1141	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1142		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1143			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1144			       sizeof(key->tp.src));
1145		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1146			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1147			       sizeof(key->tp.dst));
1148	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1149		   key->basic.ip_proto == IPPROTO_ICMP) {
1150		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1151			       &mask->icmp.type,
1152			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1153			       sizeof(key->icmp.type));
1154		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1155			       &mask->icmp.code,
1156			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1157			       sizeof(key->icmp.code));
1158	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1159		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1160		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1161			       &mask->icmp.type,
1162			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1163			       sizeof(key->icmp.type));
1164		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1165			       &mask->icmp.code,
1166			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1167			       sizeof(key->icmp.code));
1168	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1169		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1170		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1171		if (ret)
1172			return ret;
1173	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1174		   key->basic.n_proto == htons(ETH_P_RARP)) {
1175		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1176			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1177			       sizeof(key->arp.sip));
1178		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1179			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1180			       sizeof(key->arp.tip));
1181		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1182			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1183			       sizeof(key->arp.op));
1184		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1185			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1186			       sizeof(key->arp.sha));
1187		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1188			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1189			       sizeof(key->arp.tha));
1190	}
1191
1192	if (key->basic.ip_proto == IPPROTO_TCP ||
1193	    key->basic.ip_proto == IPPROTO_UDP ||
1194	    key->basic.ip_proto == IPPROTO_SCTP) {
1195		ret = fl_set_key_port_range(tb, key, mask);
1196		if (ret)
1197			return ret;
1198	}
1199
1200	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1201	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1202		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1203		mask->enc_control.addr_type = ~0;
1204		fl_set_key_val(tb, &key->enc_ipv4.src,
1205			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1206			       &mask->enc_ipv4.src,
1207			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1208			       sizeof(key->enc_ipv4.src));
1209		fl_set_key_val(tb, &key->enc_ipv4.dst,
1210			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1211			       &mask->enc_ipv4.dst,
1212			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1213			       sizeof(key->enc_ipv4.dst));
1214	}
1215
1216	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1217	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1218		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1219		mask->enc_control.addr_type = ~0;
1220		fl_set_key_val(tb, &key->enc_ipv6.src,
1221			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1222			       &mask->enc_ipv6.src,
1223			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1224			       sizeof(key->enc_ipv6.src));
1225		fl_set_key_val(tb, &key->enc_ipv6.dst,
1226			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1227			       &mask->enc_ipv6.dst,
1228			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1229			       sizeof(key->enc_ipv6.dst));
1230	}
1231
1232	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1233		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1234		       sizeof(key->enc_key_id.keyid));
1235
1236	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1237		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1238		       sizeof(key->enc_tp.src));
1239
1240	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1241		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1242		       sizeof(key->enc_tp.dst));
1243
1244	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1245
 
 
 
 
1246	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1247		ret = fl_set_enc_opt(tb, key, mask, extack);
1248		if (ret)
1249			return ret;
1250	}
1251
1252	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1253	if (ret)
1254		return ret;
1255
1256	if (tb[TCA_FLOWER_KEY_FLAGS])
1257		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
 
1258
1259	return ret;
1260}
1261
1262static void fl_mask_copy(struct fl_flow_mask *dst,
1263			 struct fl_flow_mask *src)
1264{
1265	const void *psrc = fl_key_get_start(&src->key, src);
1266	void *pdst = fl_key_get_start(&dst->key, src);
1267
1268	memcpy(pdst, psrc, fl_mask_range(src));
1269	dst->range = src->range;
1270}
1271
1272static const struct rhashtable_params fl_ht_params = {
1273	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1274	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1275	.automatic_shrinking = true,
1276};
1277
1278static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1279{
1280	mask->filter_ht_params = fl_ht_params;
1281	mask->filter_ht_params.key_len = fl_mask_range(mask);
1282	mask->filter_ht_params.key_offset += mask->range.start;
1283
1284	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1285}
1286
1287#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1288#define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1289
1290#define FL_KEY_IS_MASKED(mask, member)						\
1291	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1292		   0, FL_KEY_MEMBER_SIZE(member))				\
1293
1294#define FL_KEY_SET(keys, cnt, id, member)					\
1295	do {									\
1296		keys[cnt].key_id = id;						\
1297		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1298		cnt++;								\
1299	} while(0);
1300
1301#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1302	do {									\
1303		if (FL_KEY_IS_MASKED(mask, member))				\
1304			FL_KEY_SET(keys, cnt, id, member);			\
1305	} while(0);
1306
1307static void fl_init_dissector(struct flow_dissector *dissector,
1308			      struct fl_flow_key *mask)
1309{
1310	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1311	size_t cnt = 0;
1312
1313	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1314			     FLOW_DISSECTOR_KEY_META, meta);
1315	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1316	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1317	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1318			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1319	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1320			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1321	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1322			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1323	if (FL_KEY_IS_MASKED(mask, tp) ||
1324	    FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1325		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
 
1326	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1327			     FLOW_DISSECTOR_KEY_IP, ip);
1328	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1329			     FLOW_DISSECTOR_KEY_TCP, tcp);
1330	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1331			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1332	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1333			     FLOW_DISSECTOR_KEY_ARP, arp);
1334	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1335			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1336	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1337			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1338	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1339			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1340	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1341			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1342	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1343			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1344	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1345			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1346	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1347	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1348		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1349			   enc_control);
1350	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1351			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1352	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1353			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1354	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1355			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1356	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1357			     FLOW_DISSECTOR_KEY_CT, ct);
 
 
1358
1359	skb_flow_dissector_init(dissector, keys, cnt);
1360}
1361
1362static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1363					       struct fl_flow_mask *mask)
1364{
1365	struct fl_flow_mask *newmask;
1366	int err;
1367
1368	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1369	if (!newmask)
1370		return ERR_PTR(-ENOMEM);
1371
1372	fl_mask_copy(newmask, mask);
1373
1374	if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1375	    (newmask->key.tp_min.src && newmask->key.tp_max.src))
 
 
1376		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1377
1378	err = fl_init_mask_hashtable(newmask);
1379	if (err)
1380		goto errout_free;
1381
1382	fl_init_dissector(&newmask->dissector, &newmask->key);
1383
1384	INIT_LIST_HEAD_RCU(&newmask->filters);
1385
1386	refcount_set(&newmask->refcnt, 1);
1387	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1388				      &newmask->ht_node, mask_ht_params);
1389	if (err)
1390		goto errout_destroy;
1391
1392	spin_lock(&head->masks_lock);
1393	list_add_tail_rcu(&newmask->list, &head->masks);
1394	spin_unlock(&head->masks_lock);
1395
1396	return newmask;
1397
1398errout_destroy:
1399	rhashtable_destroy(&newmask->ht);
1400errout_free:
1401	kfree(newmask);
1402
1403	return ERR_PTR(err);
1404}
1405
1406static int fl_check_assign_mask(struct cls_fl_head *head,
1407				struct cls_fl_filter *fnew,
1408				struct cls_fl_filter *fold,
1409				struct fl_flow_mask *mask)
1410{
1411	struct fl_flow_mask *newmask;
1412	int ret = 0;
1413
1414	rcu_read_lock();
1415
1416	/* Insert mask as temporary node to prevent concurrent creation of mask
1417	 * with same key. Any concurrent lookups with same key will return
1418	 * -EAGAIN because mask's refcnt is zero.
1419	 */
1420	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1421						       &mask->ht_node,
1422						       mask_ht_params);
1423	if (!fnew->mask) {
1424		rcu_read_unlock();
1425
1426		if (fold) {
1427			ret = -EINVAL;
1428			goto errout_cleanup;
1429		}
1430
1431		newmask = fl_create_new_mask(head, mask);
1432		if (IS_ERR(newmask)) {
1433			ret = PTR_ERR(newmask);
1434			goto errout_cleanup;
1435		}
1436
1437		fnew->mask = newmask;
1438		return 0;
1439	} else if (IS_ERR(fnew->mask)) {
1440		ret = PTR_ERR(fnew->mask);
1441	} else if (fold && fold->mask != fnew->mask) {
1442		ret = -EINVAL;
1443	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1444		/* Mask was deleted concurrently, try again */
1445		ret = -EAGAIN;
1446	}
1447	rcu_read_unlock();
1448	return ret;
1449
1450errout_cleanup:
1451	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1452			       mask_ht_params);
1453	return ret;
1454}
1455
1456static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1457			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1458			unsigned long base, struct nlattr **tb,
1459			struct nlattr *est, bool ovr,
1460			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1461			struct netlink_ext_ack *extack)
1462{
1463	int err;
1464
1465	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1466				extack);
1467	if (err < 0)
1468		return err;
1469
1470	if (tb[TCA_FLOWER_CLASSID]) {
1471		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1472		if (!rtnl_held)
1473			rtnl_lock();
1474		tcf_bind_filter(tp, &f->res, base);
1475		if (!rtnl_held)
1476			rtnl_unlock();
1477	}
1478
1479	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1480	if (err)
1481		return err;
1482
1483	fl_mask_update_range(mask);
1484	fl_set_masked_key(&f->mkey, &f->key, mask);
1485
1486	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1487		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1488		return -EINVAL;
1489	}
1490
1491	return 0;
1492}
1493
1494static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1495			       struct cls_fl_filter *fold,
1496			       bool *in_ht)
1497{
1498	struct fl_flow_mask *mask = fnew->mask;
1499	int err;
1500
1501	err = rhashtable_lookup_insert_fast(&mask->ht,
1502					    &fnew->ht_node,
1503					    mask->filter_ht_params);
1504	if (err) {
1505		*in_ht = false;
1506		/* It is okay if filter with same key exists when
1507		 * overwriting.
1508		 */
1509		return fold && err == -EEXIST ? 0 : err;
1510	}
1511
1512	*in_ht = true;
1513	return 0;
1514}
1515
1516static int fl_change(struct net *net, struct sk_buff *in_skb,
1517		     struct tcf_proto *tp, unsigned long base,
1518		     u32 handle, struct nlattr **tca,
1519		     void **arg, bool ovr, bool rtnl_held,
1520		     struct netlink_ext_ack *extack)
1521{
1522	struct cls_fl_head *head = fl_head_dereference(tp);
1523	struct cls_fl_filter *fold = *arg;
1524	struct cls_fl_filter *fnew;
1525	struct fl_flow_mask *mask;
1526	struct nlattr **tb;
1527	bool in_ht;
1528	int err;
1529
1530	if (!tca[TCA_OPTIONS]) {
1531		err = -EINVAL;
1532		goto errout_fold;
1533	}
1534
1535	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1536	if (!mask) {
1537		err = -ENOBUFS;
1538		goto errout_fold;
1539	}
1540
1541	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1542	if (!tb) {
1543		err = -ENOBUFS;
1544		goto errout_mask_alloc;
1545	}
1546
1547	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1548					  tca[TCA_OPTIONS], fl_policy, NULL);
1549	if (err < 0)
1550		goto errout_tb;
1551
1552	if (fold && handle && fold->handle != handle) {
1553		err = -EINVAL;
1554		goto errout_tb;
1555	}
1556
1557	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1558	if (!fnew) {
1559		err = -ENOBUFS;
1560		goto errout_tb;
1561	}
1562	INIT_LIST_HEAD(&fnew->hw_list);
1563	refcount_set(&fnew->refcnt, 1);
1564
1565	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1566	if (err < 0)
1567		goto errout;
1568
1569	if (tb[TCA_FLOWER_FLAGS]) {
1570		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1571
1572		if (!tc_flags_valid(fnew->flags)) {
1573			err = -EINVAL;
1574			goto errout;
1575		}
1576	}
1577
1578	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1579			   tp->chain->tmplt_priv, rtnl_held, extack);
1580	if (err)
1581		goto errout;
1582
1583	err = fl_check_assign_mask(head, fnew, fold, mask);
1584	if (err)
1585		goto errout;
1586
1587	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1588	if (err)
1589		goto errout_mask;
1590
1591	if (!tc_skip_hw(fnew->flags)) {
1592		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1593		if (err)
1594			goto errout_ht;
1595	}
1596
1597	if (!tc_in_hw(fnew->flags))
1598		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1599
1600	spin_lock(&tp->lock);
1601
1602	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1603	 * proto again or create new one, if necessary.
1604	 */
1605	if (tp->deleting) {
1606		err = -EAGAIN;
1607		goto errout_hw;
1608	}
1609
1610	if (fold) {
1611		/* Fold filter was deleted concurrently. Retry lookup. */
1612		if (fold->deleted) {
1613			err = -EAGAIN;
1614			goto errout_hw;
1615		}
1616
1617		fnew->handle = handle;
1618
1619		if (!in_ht) {
1620			struct rhashtable_params params =
1621				fnew->mask->filter_ht_params;
1622
1623			err = rhashtable_insert_fast(&fnew->mask->ht,
1624						     &fnew->ht_node,
1625						     params);
1626			if (err)
1627				goto errout_hw;
1628			in_ht = true;
1629		}
1630
1631		refcount_inc(&fnew->refcnt);
1632		rhashtable_remove_fast(&fold->mask->ht,
1633				       &fold->ht_node,
1634				       fold->mask->filter_ht_params);
1635		idr_replace(&head->handle_idr, fnew, fnew->handle);
1636		list_replace_rcu(&fold->list, &fnew->list);
1637		fold->deleted = true;
1638
1639		spin_unlock(&tp->lock);
1640
1641		fl_mask_put(head, fold->mask);
1642		if (!tc_skip_hw(fold->flags))
1643			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1644		tcf_unbind_filter(tp, &fold->res);
1645		/* Caller holds reference to fold, so refcnt is always > 0
1646		 * after this.
1647		 */
1648		refcount_dec(&fold->refcnt);
1649		__fl_put(fold);
1650	} else {
1651		if (handle) {
1652			/* user specifies a handle and it doesn't exist */
1653			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1654					    handle, GFP_ATOMIC);
1655
1656			/* Filter with specified handle was concurrently
1657			 * inserted after initial check in cls_api. This is not
1658			 * necessarily an error if NLM_F_EXCL is not set in
1659			 * message flags. Returning EAGAIN will cause cls_api to
1660			 * try to update concurrently inserted rule.
1661			 */
1662			if (err == -ENOSPC)
1663				err = -EAGAIN;
1664		} else {
1665			handle = 1;
1666			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1667					    INT_MAX, GFP_ATOMIC);
1668		}
1669		if (err)
1670			goto errout_hw;
1671
1672		refcount_inc(&fnew->refcnt);
1673		fnew->handle = handle;
1674		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1675		spin_unlock(&tp->lock);
1676	}
1677
1678	*arg = fnew;
1679
1680	kfree(tb);
1681	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1682	return 0;
1683
1684errout_ht:
1685	spin_lock(&tp->lock);
1686errout_hw:
1687	fnew->deleted = true;
1688	spin_unlock(&tp->lock);
1689	if (!tc_skip_hw(fnew->flags))
1690		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1691	if (in_ht)
1692		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1693				       fnew->mask->filter_ht_params);
1694errout_mask:
1695	fl_mask_put(head, fnew->mask);
1696errout:
1697	__fl_put(fnew);
1698errout_tb:
1699	kfree(tb);
1700errout_mask_alloc:
1701	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1702errout_fold:
1703	if (fold)
1704		__fl_put(fold);
1705	return err;
1706}
1707
1708static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1709		     bool rtnl_held, struct netlink_ext_ack *extack)
1710{
1711	struct cls_fl_head *head = fl_head_dereference(tp);
1712	struct cls_fl_filter *f = arg;
1713	bool last_on_mask;
1714	int err = 0;
1715
1716	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1717	*last = list_empty(&head->masks);
1718	__fl_put(f);
1719
1720	return err;
1721}
1722
1723static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1724		    bool rtnl_held)
1725{
1726	struct cls_fl_head *head = fl_head_dereference(tp);
1727	unsigned long id = arg->cookie, tmp;
1728	struct cls_fl_filter *f;
1729
1730	arg->count = arg->skip;
1731
1732	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1733		/* don't return filters that are being deleted */
1734		if (!refcount_inc_not_zero(&f->refcnt))
1735			continue;
1736		if (arg->fn(tp, f, arg) < 0) {
1737			__fl_put(f);
1738			arg->stop = 1;
1739			break;
1740		}
1741		__fl_put(f);
1742		arg->count++;
1743	}
1744	arg->cookie = id;
1745}
1746
1747static struct cls_fl_filter *
1748fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1749{
1750	struct cls_fl_head *head = fl_head_dereference(tp);
1751
1752	spin_lock(&tp->lock);
1753	if (list_empty(&head->hw_filters)) {
1754		spin_unlock(&tp->lock);
1755		return NULL;
1756	}
1757
1758	if (!f)
1759		f = list_entry(&head->hw_filters, struct cls_fl_filter,
1760			       hw_list);
1761	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1762		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1763			spin_unlock(&tp->lock);
1764			return f;
1765		}
1766	}
1767
1768	spin_unlock(&tp->lock);
1769	return NULL;
1770}
1771
1772static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1773			void *cb_priv, struct netlink_ext_ack *extack)
1774{
1775	struct tcf_block *block = tp->chain->block;
1776	struct flow_cls_offload cls_flower = {};
1777	struct cls_fl_filter *f = NULL;
1778	int err;
1779
1780	/* hw_filters list can only be changed by hw offload functions after
1781	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1782	 * iterating it.
1783	 */
1784	ASSERT_RTNL();
1785
1786	while ((f = fl_get_next_hw_filter(tp, f, add))) {
1787		cls_flower.rule =
1788			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1789		if (!cls_flower.rule) {
1790			__fl_put(f);
1791			return -ENOMEM;
1792		}
1793
1794		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1795					   extack);
1796		cls_flower.command = add ?
1797			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
1798		cls_flower.cookie = (unsigned long)f;
1799		cls_flower.rule->match.dissector = &f->mask->dissector;
1800		cls_flower.rule->match.mask = &f->mask->key;
1801		cls_flower.rule->match.key = &f->mkey;
1802
1803		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
1804					   true);
1805		if (err) {
1806			kfree(cls_flower.rule);
1807			if (tc_skip_sw(f->flags)) {
1808				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1809				__fl_put(f);
1810				return err;
1811			}
1812			goto next_flow;
1813		}
1814
1815		cls_flower.classid = f->res.classid;
1816
1817		err = tc_setup_cb_reoffload(block, tp, add, cb,
1818					    TC_SETUP_CLSFLOWER, &cls_flower,
1819					    cb_priv, &f->flags,
1820					    &f->in_hw_count);
1821		tc_cleanup_flow_action(&cls_flower.rule->action);
1822		kfree(cls_flower.rule);
1823
1824		if (err) {
1825			__fl_put(f);
1826			return err;
1827		}
1828next_flow:
1829		__fl_put(f);
1830	}
1831
1832	return 0;
1833}
1834
1835static void fl_hw_add(struct tcf_proto *tp, void *type_data)
1836{
1837	struct flow_cls_offload *cls_flower = type_data;
1838	struct cls_fl_filter *f =
1839		(struct cls_fl_filter *) cls_flower->cookie;
1840	struct cls_fl_head *head = fl_head_dereference(tp);
1841
1842	spin_lock(&tp->lock);
1843	list_add(&f->hw_list, &head->hw_filters);
1844	spin_unlock(&tp->lock);
1845}
1846
1847static void fl_hw_del(struct tcf_proto *tp, void *type_data)
1848{
1849	struct flow_cls_offload *cls_flower = type_data;
1850	struct cls_fl_filter *f =
1851		(struct cls_fl_filter *) cls_flower->cookie;
1852
1853	spin_lock(&tp->lock);
1854	if (!list_empty(&f->hw_list))
1855		list_del_init(&f->hw_list);
1856	spin_unlock(&tp->lock);
1857}
1858
1859static int fl_hw_create_tmplt(struct tcf_chain *chain,
1860			      struct fl_flow_tmplt *tmplt)
1861{
1862	struct flow_cls_offload cls_flower = {};
1863	struct tcf_block *block = chain->block;
1864
1865	cls_flower.rule = flow_rule_alloc(0);
1866	if (!cls_flower.rule)
1867		return -ENOMEM;
1868
1869	cls_flower.common.chain_index = chain->index;
1870	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
1871	cls_flower.cookie = (unsigned long) tmplt;
1872	cls_flower.rule->match.dissector = &tmplt->dissector;
1873	cls_flower.rule->match.mask = &tmplt->mask;
1874	cls_flower.rule->match.key = &tmplt->dummy_key;
1875
1876	/* We don't care if driver (any of them) fails to handle this
1877	 * call. It serves just as a hint for it.
1878	 */
1879	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
1880	kfree(cls_flower.rule);
1881
1882	return 0;
1883}
1884
1885static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1886				struct fl_flow_tmplt *tmplt)
1887{
1888	struct flow_cls_offload cls_flower = {};
1889	struct tcf_block *block = chain->block;
1890
1891	cls_flower.common.chain_index = chain->index;
1892	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
1893	cls_flower.cookie = (unsigned long) tmplt;
1894
1895	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
1896}
1897
1898static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1899			     struct nlattr **tca,
1900			     struct netlink_ext_ack *extack)
1901{
1902	struct fl_flow_tmplt *tmplt;
1903	struct nlattr **tb;
1904	int err;
1905
1906	if (!tca[TCA_OPTIONS])
1907		return ERR_PTR(-EINVAL);
1908
1909	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1910	if (!tb)
1911		return ERR_PTR(-ENOBUFS);
1912	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1913					  tca[TCA_OPTIONS], fl_policy, NULL);
1914	if (err)
1915		goto errout_tb;
1916
1917	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1918	if (!tmplt) {
1919		err = -ENOMEM;
1920		goto errout_tb;
1921	}
1922	tmplt->chain = chain;
1923	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1924	if (err)
1925		goto errout_tmplt;
1926
1927	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1928
1929	err = fl_hw_create_tmplt(chain, tmplt);
1930	if (err)
1931		goto errout_tmplt;
1932
1933	kfree(tb);
1934	return tmplt;
1935
1936errout_tmplt:
1937	kfree(tmplt);
1938errout_tb:
1939	kfree(tb);
1940	return ERR_PTR(err);
1941}
1942
1943static void fl_tmplt_destroy(void *tmplt_priv)
1944{
1945	struct fl_flow_tmplt *tmplt = tmplt_priv;
1946
1947	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1948	kfree(tmplt);
1949}
1950
1951static int fl_dump_key_val(struct sk_buff *skb,
1952			   void *val, int val_type,
1953			   void *mask, int mask_type, int len)
1954{
1955	int err;
1956
1957	if (!memchr_inv(mask, 0, len))
1958		return 0;
1959	err = nla_put(skb, val_type, len, val);
1960	if (err)
1961		return err;
1962	if (mask_type != TCA_FLOWER_UNSPEC) {
1963		err = nla_put(skb, mask_type, len, mask);
1964		if (err)
1965			return err;
1966	}
1967	return 0;
1968}
1969
1970static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1971				  struct fl_flow_key *mask)
1972{
1973	if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1974			    &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1975			    sizeof(key->tp_min.dst)) ||
1976	    fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1977			    &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1978			    sizeof(key->tp_max.dst)) ||
1979	    fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1980			    &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1981			    sizeof(key->tp_min.src)) ||
1982	    fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1983			    &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1984			    sizeof(key->tp_max.src)))
 
 
 
 
1985		return -1;
1986
1987	return 0;
1988}
1989
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1990static int fl_dump_key_mpls(struct sk_buff *skb,
1991			    struct flow_dissector_key_mpls *mpls_key,
1992			    struct flow_dissector_key_mpls *mpls_mask)
1993{
 
 
1994	int err;
1995
1996	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1997		return 0;
1998	if (mpls_mask->mpls_ttl) {
 
 
 
 
 
 
 
 
 
 
 
 
1999		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2000				 mpls_key->mpls_ttl);
2001		if (err)
2002			return err;
2003	}
2004	if (mpls_mask->mpls_tc) {
2005		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2006				 mpls_key->mpls_tc);
2007		if (err)
2008			return err;
2009	}
2010	if (mpls_mask->mpls_label) {
2011		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2012				  mpls_key->mpls_label);
2013		if (err)
2014			return err;
2015	}
2016	if (mpls_mask->mpls_bos) {
2017		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2018				 mpls_key->mpls_bos);
2019		if (err)
2020			return err;
2021	}
2022	return 0;
2023}
2024
2025static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2026			  struct flow_dissector_key_ip *key,
2027			  struct flow_dissector_key_ip *mask)
2028{
2029	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2030	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2031	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2032	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2033
2034	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2035	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2036		return -1;
2037
2038	return 0;
2039}
2040
2041static int fl_dump_key_vlan(struct sk_buff *skb,
2042			    int vlan_id_key, int vlan_prio_key,
2043			    struct flow_dissector_key_vlan *vlan_key,
2044			    struct flow_dissector_key_vlan *vlan_mask)
2045{
2046	int err;
2047
2048	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2049		return 0;
2050	if (vlan_mask->vlan_id) {
2051		err = nla_put_u16(skb, vlan_id_key,
2052				  vlan_key->vlan_id);
2053		if (err)
2054			return err;
2055	}
2056	if (vlan_mask->vlan_priority) {
2057		err = nla_put_u8(skb, vlan_prio_key,
2058				 vlan_key->vlan_priority);
2059		if (err)
2060			return err;
2061	}
2062	return 0;
2063}
2064
2065static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2066			    u32 *flower_key, u32 *flower_mask,
2067			    u32 flower_flag_bit, u32 dissector_flag_bit)
2068{
2069	if (dissector_mask & dissector_flag_bit) {
2070		*flower_mask |= flower_flag_bit;
2071		if (dissector_key & dissector_flag_bit)
2072			*flower_key |= flower_flag_bit;
2073	}
2074}
2075
2076static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2077{
2078	u32 key, mask;
2079	__be32 _key, _mask;
2080	int err;
2081
2082	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2083		return 0;
2084
2085	key = 0;
2086	mask = 0;
2087
2088	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2089			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2090	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2091			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2092			FLOW_DIS_FIRST_FRAG);
2093
2094	_key = cpu_to_be32(key);
2095	_mask = cpu_to_be32(mask);
2096
2097	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2098	if (err)
2099		return err;
2100
2101	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2102}
2103
2104static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2105				  struct flow_dissector_key_enc_opts *enc_opts)
2106{
2107	struct geneve_opt *opt;
2108	struct nlattr *nest;
2109	int opt_off = 0;
2110
2111	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2112	if (!nest)
2113		goto nla_put_failure;
2114
2115	while (enc_opts->len > opt_off) {
2116		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2117
2118		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2119				 opt->opt_class))
2120			goto nla_put_failure;
2121		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2122			       opt->type))
2123			goto nla_put_failure;
2124		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2125			    opt->length * 4, opt->opt_data))
2126			goto nla_put_failure;
2127
2128		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2129	}
2130	nla_nest_end(skb, nest);
2131	return 0;
2132
2133nla_put_failure:
2134	nla_nest_cancel(skb, nest);
2135	return -EMSGSIZE;
2136}
2137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2138static int fl_dump_key_ct(struct sk_buff *skb,
2139			  struct flow_dissector_key_ct *key,
2140			  struct flow_dissector_key_ct *mask)
2141{
2142	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2143	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2144			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2145			    sizeof(key->ct_state)))
2146		goto nla_put_failure;
2147
2148	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2149	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2150			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2151			    sizeof(key->ct_zone)))
2152		goto nla_put_failure;
2153
2154	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2155	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2156			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2157			    sizeof(key->ct_mark)))
2158		goto nla_put_failure;
2159
2160	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2161	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2162			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2163			    sizeof(key->ct_labels)))
2164		goto nla_put_failure;
2165
2166	return 0;
2167
2168nla_put_failure:
2169	return -EMSGSIZE;
2170}
2171
2172static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2173			       struct flow_dissector_key_enc_opts *enc_opts)
2174{
2175	struct nlattr *nest;
2176	int err;
2177
2178	if (!enc_opts->len)
2179		return 0;
2180
2181	nest = nla_nest_start_noflag(skb, enc_opt_type);
2182	if (!nest)
2183		goto nla_put_failure;
2184
2185	switch (enc_opts->dst_opt_type) {
2186	case TUNNEL_GENEVE_OPT:
2187		err = fl_dump_key_geneve_opt(skb, enc_opts);
2188		if (err)
2189			goto nla_put_failure;
2190		break;
 
 
 
 
 
 
 
 
 
 
2191	default:
2192		goto nla_put_failure;
2193	}
2194	nla_nest_end(skb, nest);
2195	return 0;
2196
2197nla_put_failure:
2198	nla_nest_cancel(skb, nest);
2199	return -EMSGSIZE;
2200}
2201
2202static int fl_dump_key_enc_opt(struct sk_buff *skb,
2203			       struct flow_dissector_key_enc_opts *key_opts,
2204			       struct flow_dissector_key_enc_opts *msk_opts)
2205{
2206	int err;
2207
2208	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2209	if (err)
2210		return err;
2211
2212	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2213}
2214
2215static int fl_dump_key(struct sk_buff *skb, struct net *net,
2216		       struct fl_flow_key *key, struct fl_flow_key *mask)
2217{
2218	if (mask->meta.ingress_ifindex) {
2219		struct net_device *dev;
2220
2221		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2222		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2223			goto nla_put_failure;
2224	}
2225
2226	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2227			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2228			    sizeof(key->eth.dst)) ||
2229	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2230			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2231			    sizeof(key->eth.src)) ||
2232	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2233			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2234			    sizeof(key->basic.n_proto)))
2235		goto nla_put_failure;
2236
2237	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2238		goto nla_put_failure;
2239
2240	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2241			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2242		goto nla_put_failure;
2243
2244	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2245			     TCA_FLOWER_KEY_CVLAN_PRIO,
2246			     &key->cvlan, &mask->cvlan) ||
2247	    (mask->cvlan.vlan_tpid &&
2248	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2249			  key->cvlan.vlan_tpid)))
2250		goto nla_put_failure;
2251
2252	if (mask->basic.n_proto) {
2253		if (mask->cvlan.vlan_tpid) {
2254			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2255					 key->basic.n_proto))
2256				goto nla_put_failure;
2257		} else if (mask->vlan.vlan_tpid) {
2258			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2259					 key->basic.n_proto))
2260				goto nla_put_failure;
2261		}
2262	}
2263
2264	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2265	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2266	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2267			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2268			    sizeof(key->basic.ip_proto)) ||
2269	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2270		goto nla_put_failure;
2271
2272	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2273	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2274			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2275			     sizeof(key->ipv4.src)) ||
2276	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2277			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2278			     sizeof(key->ipv4.dst))))
2279		goto nla_put_failure;
2280	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2281		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2282				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2283				  sizeof(key->ipv6.src)) ||
2284		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2285				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2286				  sizeof(key->ipv6.dst))))
2287		goto nla_put_failure;
2288
2289	if (key->basic.ip_proto == IPPROTO_TCP &&
2290	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2291			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2292			     sizeof(key->tp.src)) ||
2293	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2294			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2295			     sizeof(key->tp.dst)) ||
2296	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2297			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2298			     sizeof(key->tcp.flags))))
2299		goto nla_put_failure;
2300	else if (key->basic.ip_proto == IPPROTO_UDP &&
2301		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2302				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2303				  sizeof(key->tp.src)) ||
2304		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2305				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2306				  sizeof(key->tp.dst))))
2307		goto nla_put_failure;
2308	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2309		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2310				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2311				  sizeof(key->tp.src)) ||
2312		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2313				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2314				  sizeof(key->tp.dst))))
2315		goto nla_put_failure;
2316	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2317		 key->basic.ip_proto == IPPROTO_ICMP &&
2318		 (fl_dump_key_val(skb, &key->icmp.type,
2319				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2320				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2321				  sizeof(key->icmp.type)) ||
2322		  fl_dump_key_val(skb, &key->icmp.code,
2323				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2324				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2325				  sizeof(key->icmp.code))))
2326		goto nla_put_failure;
2327	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2328		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2329		 (fl_dump_key_val(skb, &key->icmp.type,
2330				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2331				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2332				  sizeof(key->icmp.type)) ||
2333		  fl_dump_key_val(skb, &key->icmp.code,
2334				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2335				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2336				  sizeof(key->icmp.code))))
2337		goto nla_put_failure;
2338	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2339		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2340		 (fl_dump_key_val(skb, &key->arp.sip,
2341				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2342				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2343				  sizeof(key->arp.sip)) ||
2344		  fl_dump_key_val(skb, &key->arp.tip,
2345				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2346				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2347				  sizeof(key->arp.tip)) ||
2348		  fl_dump_key_val(skb, &key->arp.op,
2349				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2350				  TCA_FLOWER_KEY_ARP_OP_MASK,
2351				  sizeof(key->arp.op)) ||
2352		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2353				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2354				  sizeof(key->arp.sha)) ||
2355		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2356				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2357				  sizeof(key->arp.tha))))
2358		goto nla_put_failure;
2359
2360	if ((key->basic.ip_proto == IPPROTO_TCP ||
2361	     key->basic.ip_proto == IPPROTO_UDP ||
2362	     key->basic.ip_proto == IPPROTO_SCTP) &&
2363	     fl_dump_key_port_range(skb, key, mask))
2364		goto nla_put_failure;
2365
2366	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2367	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2368			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2369			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2370			    sizeof(key->enc_ipv4.src)) ||
2371	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2372			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2373			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2374			     sizeof(key->enc_ipv4.dst))))
2375		goto nla_put_failure;
2376	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2377		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2378			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2379			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2380			    sizeof(key->enc_ipv6.src)) ||
2381		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2382				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2383				 &mask->enc_ipv6.dst,
2384				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2385			    sizeof(key->enc_ipv6.dst))))
2386		goto nla_put_failure;
2387
2388	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2389			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2390			    sizeof(key->enc_key_id)) ||
2391	    fl_dump_key_val(skb, &key->enc_tp.src,
2392			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2393			    &mask->enc_tp.src,
2394			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2395			    sizeof(key->enc_tp.src)) ||
2396	    fl_dump_key_val(skb, &key->enc_tp.dst,
2397			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2398			    &mask->enc_tp.dst,
2399			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2400			    sizeof(key->enc_tp.dst)) ||
2401	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2402	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2403		goto nla_put_failure;
2404
2405	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2406		goto nla_put_failure;
2407
2408	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2409		goto nla_put_failure;
2410
 
 
 
 
 
2411	return 0;
2412
2413nla_put_failure:
2414	return -EMSGSIZE;
2415}
2416
2417static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2418		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2419{
2420	struct cls_fl_filter *f = fh;
2421	struct nlattr *nest;
2422	struct fl_flow_key *key, *mask;
2423	bool skip_hw;
2424
2425	if (!f)
2426		return skb->len;
2427
2428	t->tcm_handle = f->handle;
2429
2430	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2431	if (!nest)
2432		goto nla_put_failure;
2433
2434	spin_lock(&tp->lock);
2435
2436	if (f->res.classid &&
2437	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2438		goto nla_put_failure_locked;
2439
2440	key = &f->key;
2441	mask = &f->mask->key;
2442	skip_hw = tc_skip_hw(f->flags);
2443
2444	if (fl_dump_key(skb, net, key, mask))
2445		goto nla_put_failure_locked;
2446
2447	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2448		goto nla_put_failure_locked;
2449
2450	spin_unlock(&tp->lock);
2451
2452	if (!skip_hw)
2453		fl_hw_update_stats(tp, f, rtnl_held);
2454
2455	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2456		goto nla_put_failure;
2457
2458	if (tcf_exts_dump(skb, &f->exts))
2459		goto nla_put_failure;
2460
2461	nla_nest_end(skb, nest);
2462
2463	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2464		goto nla_put_failure;
2465
2466	return skb->len;
2467
2468nla_put_failure_locked:
2469	spin_unlock(&tp->lock);
2470nla_put_failure:
2471	nla_nest_cancel(skb, nest);
2472	return -1;
2473}
2474
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2475static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2476{
2477	struct fl_flow_tmplt *tmplt = tmplt_priv;
2478	struct fl_flow_key *key, *mask;
2479	struct nlattr *nest;
2480
2481	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2482	if (!nest)
2483		goto nla_put_failure;
2484
2485	key = &tmplt->dummy_key;
2486	mask = &tmplt->mask;
2487
2488	if (fl_dump_key(skb, net, key, mask))
2489		goto nla_put_failure;
2490
2491	nla_nest_end(skb, nest);
2492
2493	return skb->len;
2494
2495nla_put_failure:
2496	nla_nest_cancel(skb, nest);
2497	return -EMSGSIZE;
2498}
2499
2500static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
 
2501{
2502	struct cls_fl_filter *f = fh;
2503
2504	if (f && f->res.classid == classid)
2505		f->res.class = cl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2506}
2507
2508static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2509	.kind		= "flower",
2510	.classify	= fl_classify,
2511	.init		= fl_init,
2512	.destroy	= fl_destroy,
2513	.get		= fl_get,
2514	.put		= fl_put,
2515	.change		= fl_change,
2516	.delete		= fl_delete,
 
2517	.walk		= fl_walk,
2518	.reoffload	= fl_reoffload,
2519	.hw_add		= fl_hw_add,
2520	.hw_del		= fl_hw_del,
2521	.dump		= fl_dump,
 
2522	.bind_class	= fl_bind_class,
2523	.tmplt_create	= fl_tmplt_create,
2524	.tmplt_destroy	= fl_tmplt_destroy,
2525	.tmplt_dump	= fl_tmplt_dump,
2526	.owner		= THIS_MODULE,
2527	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
2528};
2529
2530static int __init cls_fl_init(void)
2531{
2532	return register_tcf_proto_ops(&cls_fl_ops);
2533}
2534
2535static void __exit cls_fl_exit(void)
2536{
2537	unregister_tcf_proto_ops(&cls_fl_ops);
2538}
2539
2540module_init(cls_fl_init);
2541module_exit(cls_fl_exit);
2542
2543MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2544MODULE_DESCRIPTION("Flower classifier");
2545MODULE_LICENSE("GPL v2");