Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1/*
   2 * net/sched/cls_flower.c		Flower classifier
   3 *
   4 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/init.h>
  14#include <linux/module.h>
  15#include <linux/rhashtable.h>
  16#include <linux/workqueue.h>
  17
  18#include <linux/if_ether.h>
  19#include <linux/in6.h>
  20#include <linux/ip.h>
  21#include <linux/mpls.h>
  22
  23#include <net/sch_generic.h>
  24#include <net/pkt_cls.h>
  25#include <net/ip.h>
  26#include <net/flow_dissector.h>
  27
  28#include <net/dst.h>
  29#include <net/dst_metadata.h>
  30
  31struct fl_flow_key {
  32	int	indev_ifindex;
  33	struct flow_dissector_key_control control;
  34	struct flow_dissector_key_control enc_control;
  35	struct flow_dissector_key_basic basic;
  36	struct flow_dissector_key_eth_addrs eth;
  37	struct flow_dissector_key_vlan vlan;
  38	union {
  39		struct flow_dissector_key_ipv4_addrs ipv4;
  40		struct flow_dissector_key_ipv6_addrs ipv6;
  41	};
  42	struct flow_dissector_key_ports tp;
  43	struct flow_dissector_key_icmp icmp;
  44	struct flow_dissector_key_arp arp;
  45	struct flow_dissector_key_keyid enc_key_id;
  46	union {
  47		struct flow_dissector_key_ipv4_addrs enc_ipv4;
  48		struct flow_dissector_key_ipv6_addrs enc_ipv6;
  49	};
  50	struct flow_dissector_key_ports enc_tp;
  51	struct flow_dissector_key_mpls mpls;
  52	struct flow_dissector_key_tcp tcp;
  53	struct flow_dissector_key_ip ip;
  54} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
  55
  56struct fl_flow_mask_range {
  57	unsigned short int start;
  58	unsigned short int end;
  59};
  60
  61struct fl_flow_mask {
  62	struct fl_flow_key key;
  63	struct fl_flow_mask_range range;
  64	struct rcu_head	rcu;
  65};
  66
  67struct cls_fl_head {
  68	struct rhashtable ht;
  69	struct fl_flow_mask mask;
  70	struct flow_dissector dissector;
  71	bool mask_assigned;
  72	struct list_head filters;
  73	struct rhashtable_params ht_params;
  74	union {
  75		struct work_struct work;
  76		struct rcu_head	rcu;
  77	};
  78	struct idr handle_idr;
  79};
  80
  81struct cls_fl_filter {
  82	struct rhash_head ht_node;
  83	struct fl_flow_key mkey;
  84	struct tcf_exts exts;
  85	struct tcf_result res;
  86	struct fl_flow_key key;
  87	struct list_head list;
  88	u32 handle;
  89	u32 flags;
  90	union {
  91		struct work_struct work;
  92		struct rcu_head	rcu;
  93	};
  94	struct net_device *hw_dev;
  95};
  96
  97static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
  98{
  99	return mask->range.end - mask->range.start;
 100}
 101
 102static void fl_mask_update_range(struct fl_flow_mask *mask)
 103{
 104	const u8 *bytes = (const u8 *) &mask->key;
 105	size_t size = sizeof(mask->key);
 106	size_t i, first = 0, last = size - 1;
 107
 108	for (i = 0; i < sizeof(mask->key); i++) {
 109		if (bytes[i]) {
 110			if (!first && i)
 111				first = i;
 112			last = i;
 113		}
 114	}
 115	mask->range.start = rounddown(first, sizeof(long));
 116	mask->range.end = roundup(last + 1, sizeof(long));
 117}
 118
 119static void *fl_key_get_start(struct fl_flow_key *key,
 120			      const struct fl_flow_mask *mask)
 121{
 122	return (u8 *) key + mask->range.start;
 123}
 124
 125static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
 126			      struct fl_flow_mask *mask)
 127{
 128	const long *lkey = fl_key_get_start(key, mask);
 129	const long *lmask = fl_key_get_start(&mask->key, mask);
 130	long *lmkey = fl_key_get_start(mkey, mask);
 131	int i;
 132
 133	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
 134		*lmkey++ = *lkey++ & *lmask++;
 135}
 136
 137static void fl_clear_masked_range(struct fl_flow_key *key,
 138				  struct fl_flow_mask *mask)
 139{
 140	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
 141}
 142
 143static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head,
 144				       struct fl_flow_key *mkey)
 145{
 146	return rhashtable_lookup_fast(&head->ht,
 147				      fl_key_get_start(mkey, &head->mask),
 148				      head->ht_params);
 149}
 150
 151static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 152		       struct tcf_result *res)
 153{
 154	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
 155	struct cls_fl_filter *f;
 156	struct fl_flow_key skb_key;
 157	struct fl_flow_key skb_mkey;
 158
 159	if (!atomic_read(&head->ht.nelems))
 160		return -1;
 161
 162	fl_clear_masked_range(&skb_key, &head->mask);
 163
 164	skb_key.indev_ifindex = skb->skb_iif;
 165	/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
 166	 * so do it rather here.
 167	 */
 168	skb_key.basic.n_proto = skb->protocol;
 169	skb_flow_dissect_tunnel_info(skb, &head->dissector, &skb_key);
 170	skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
 171
 172	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
 173
 174	f = fl_lookup(head, &skb_mkey);
 175	if (f && !tc_skip_sw(f->flags)) {
 176		*res = f->res;
 177		return tcf_exts_exec(skb, &f->exts, res);
 178	}
 179	return -1;
 180}
 181
 182static int fl_init(struct tcf_proto *tp)
 183{
 184	struct cls_fl_head *head;
 185
 186	head = kzalloc(sizeof(*head), GFP_KERNEL);
 187	if (!head)
 188		return -ENOBUFS;
 189
 190	INIT_LIST_HEAD_RCU(&head->filters);
 191	rcu_assign_pointer(tp->root, head);
 192	idr_init(&head->handle_idr);
 193
 194	return 0;
 195}
 196
 197static void __fl_destroy_filter(struct cls_fl_filter *f)
 198{
 199	tcf_exts_destroy(&f->exts);
 200	tcf_exts_put_net(&f->exts);
 201	kfree(f);
 202}
 203
 204static void fl_destroy_filter_work(struct work_struct *work)
 205{
 206	struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
 207
 208	rtnl_lock();
 209	__fl_destroy_filter(f);
 210	rtnl_unlock();
 211}
 212
 213static void fl_destroy_filter(struct rcu_head *head)
 214{
 215	struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
 216
 217	INIT_WORK(&f->work, fl_destroy_filter_work);
 218	tcf_queue_work(&f->work);
 219}
 220
 221static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
 222				 struct netlink_ext_ack *extack)
 223{
 224	struct tc_cls_flower_offload cls_flower = {};
 225	struct tcf_block *block = tp->chain->block;
 226
 227	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 228	cls_flower.command = TC_CLSFLOWER_DESTROY;
 229	cls_flower.cookie = (unsigned long) f;
 230
 231	tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
 232			 &cls_flower, false);
 233	tcf_block_offload_dec(block, &f->flags);
 234}
 235
 236static int fl_hw_replace_filter(struct tcf_proto *tp,
 237				struct flow_dissector *dissector,
 238				struct fl_flow_key *mask,
 239				struct cls_fl_filter *f,
 240				struct netlink_ext_ack *extack)
 241{
 242	struct tc_cls_flower_offload cls_flower = {};
 243	struct tcf_block *block = tp->chain->block;
 244	bool skip_sw = tc_skip_sw(f->flags);
 245	int err;
 246
 247	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
 248	cls_flower.command = TC_CLSFLOWER_REPLACE;
 249	cls_flower.cookie = (unsigned long) f;
 250	cls_flower.dissector = dissector;
 251	cls_flower.mask = mask;
 252	cls_flower.key = &f->mkey;
 253	cls_flower.exts = &f->exts;
 254	cls_flower.classid = f->res.classid;
 255
 256	err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
 257			       &cls_flower, skip_sw);
 258	if (err < 0) {
 259		fl_hw_destroy_filter(tp, f, NULL);
 260		return err;
 261	} else if (err > 0) {
 262		tcf_block_offload_inc(block, &f->flags);
 263	}
 264
 265	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
 266		return -EINVAL;
 267
 268	return 0;
 269}
 270
 271static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
 272{
 273	struct tc_cls_flower_offload cls_flower = {};
 274	struct tcf_block *block = tp->chain->block;
 275
 276	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
 277	cls_flower.command = TC_CLSFLOWER_STATS;
 278	cls_flower.cookie = (unsigned long) f;
 279	cls_flower.exts = &f->exts;
 280	cls_flower.classid = f->res.classid;
 281
 282	tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
 283			 &cls_flower, false);
 284}
 285
 286static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
 287			struct netlink_ext_ack *extack)
 288{
 289	struct cls_fl_head *head = rtnl_dereference(tp->root);
 290
 291	idr_remove(&head->handle_idr, f->handle);
 292	list_del_rcu(&f->list);
 293	if (!tc_skip_hw(f->flags))
 294		fl_hw_destroy_filter(tp, f, extack);
 295	tcf_unbind_filter(tp, &f->res);
 296	if (tcf_exts_get_net(&f->exts))
 297		call_rcu(&f->rcu, fl_destroy_filter);
 298	else
 299		__fl_destroy_filter(f);
 300}
 301
 302static void fl_destroy_sleepable(struct work_struct *work)
 303{
 304	struct cls_fl_head *head = container_of(work, struct cls_fl_head,
 305						work);
 306	if (head->mask_assigned)
 307		rhashtable_destroy(&head->ht);
 308	kfree(head);
 309	module_put(THIS_MODULE);
 310}
 311
 312static void fl_destroy_rcu(struct rcu_head *rcu)
 313{
 314	struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
 315
 316	INIT_WORK(&head->work, fl_destroy_sleepable);
 317	schedule_work(&head->work);
 318}
 319
 320static void fl_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 321{
 322	struct cls_fl_head *head = rtnl_dereference(tp->root);
 323	struct cls_fl_filter *f, *next;
 324
 325	list_for_each_entry_safe(f, next, &head->filters, list)
 326		__fl_delete(tp, f, extack);
 327	idr_destroy(&head->handle_idr);
 328
 329	__module_get(THIS_MODULE);
 330	call_rcu(&head->rcu, fl_destroy_rcu);
 331}
 332
 333static void *fl_get(struct tcf_proto *tp, u32 handle)
 334{
 335	struct cls_fl_head *head = rtnl_dereference(tp->root);
 336
 337	return idr_find(&head->handle_idr, handle);
 338}
 339
 340static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
 341	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
 342	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
 343	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
 344					    .len = IFNAMSIZ },
 345	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
 346	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
 347	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
 348	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
 349	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
 350	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
 351	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
 352	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
 353	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
 354	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
 355	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
 356	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
 357	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
 358	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
 359	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
 360	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
 361	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
 362	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
 363	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
 364	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
 365	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
 366	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
 367	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
 368	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
 369	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
 370	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
 371	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
 372	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
 373	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
 374	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
 375	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
 376	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
 377	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
 378	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
 379	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
 380	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
 381	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
 382	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
 383	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
 384	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
 385	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
 386	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
 387	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
 388	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
 389	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
 390	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
 391	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
 392	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
 393	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
 394	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
 395	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
 396	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
 397	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
 398	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
 399	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
 400	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
 401	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
 402	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
 403	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
 404	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
 405	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
 406	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
 407	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
 408	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
 409	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
 410	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
 411	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
 412	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
 413	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
 414	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
 415	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
 416	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
 417};
 418
 419static void fl_set_key_val(struct nlattr **tb,
 420			   void *val, int val_type,
 421			   void *mask, int mask_type, int len)
 422{
 423	if (!tb[val_type])
 424		return;
 425	memcpy(val, nla_data(tb[val_type]), len);
 426	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
 427		memset(mask, 0xff, len);
 428	else
 429		memcpy(mask, nla_data(tb[mask_type]), len);
 430}
 431
 432static int fl_set_key_mpls(struct nlattr **tb,
 433			   struct flow_dissector_key_mpls *key_val,
 434			   struct flow_dissector_key_mpls *key_mask)
 435{
 436	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
 437		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
 438		key_mask->mpls_ttl = MPLS_TTL_MASK;
 439	}
 440	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
 441		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
 442
 443		if (bos & ~MPLS_BOS_MASK)
 444			return -EINVAL;
 445		key_val->mpls_bos = bos;
 446		key_mask->mpls_bos = MPLS_BOS_MASK;
 447	}
 448	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
 449		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
 450
 451		if (tc & ~MPLS_TC_MASK)
 452			return -EINVAL;
 453		key_val->mpls_tc = tc;
 454		key_mask->mpls_tc = MPLS_TC_MASK;
 455	}
 456	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
 457		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
 458
 459		if (label & ~MPLS_LABEL_MASK)
 460			return -EINVAL;
 461		key_val->mpls_label = label;
 462		key_mask->mpls_label = MPLS_LABEL_MASK;
 463	}
 464	return 0;
 465}
 466
 467static void fl_set_key_vlan(struct nlattr **tb,
 468			    struct flow_dissector_key_vlan *key_val,
 469			    struct flow_dissector_key_vlan *key_mask)
 470{
 471#define VLAN_PRIORITY_MASK	0x7
 472
 473	if (tb[TCA_FLOWER_KEY_VLAN_ID]) {
 474		key_val->vlan_id =
 475			nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK;
 476		key_mask->vlan_id = VLAN_VID_MASK;
 477	}
 478	if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) {
 479		key_val->vlan_priority =
 480			nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) &
 481			VLAN_PRIORITY_MASK;
 482		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
 483	}
 484}
 485
 486static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
 487			    u32 *dissector_key, u32 *dissector_mask,
 488			    u32 flower_flag_bit, u32 dissector_flag_bit)
 489{
 490	if (flower_mask & flower_flag_bit) {
 491		*dissector_mask |= dissector_flag_bit;
 492		if (flower_key & flower_flag_bit)
 493			*dissector_key |= dissector_flag_bit;
 494	}
 495}
 496
 497static int fl_set_key_flags(struct nlattr **tb,
 498			    u32 *flags_key, u32 *flags_mask)
 499{
 500	u32 key, mask;
 501
 502	/* mask is mandatory for flags */
 503	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
 504		return -EINVAL;
 505
 506	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
 507	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
 508
 509	*flags_key  = 0;
 510	*flags_mask = 0;
 511
 512	fl_set_key_flag(key, mask, flags_key, flags_mask,
 513			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
 514	fl_set_key_flag(key, mask, flags_key, flags_mask,
 515			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
 516			FLOW_DIS_FIRST_FRAG);
 517
 518	return 0;
 519}
 520
 521static void fl_set_key_ip(struct nlattr **tb,
 522			  struct flow_dissector_key_ip *key,
 523			  struct flow_dissector_key_ip *mask)
 524{
 525		fl_set_key_val(tb, &key->tos, TCA_FLOWER_KEY_IP_TOS,
 526			       &mask->tos, TCA_FLOWER_KEY_IP_TOS_MASK,
 527			       sizeof(key->tos));
 528
 529		fl_set_key_val(tb, &key->ttl, TCA_FLOWER_KEY_IP_TTL,
 530			       &mask->ttl, TCA_FLOWER_KEY_IP_TTL_MASK,
 531			       sizeof(key->ttl));
 532}
 533
 534static int fl_set_key(struct net *net, struct nlattr **tb,
 535		      struct fl_flow_key *key, struct fl_flow_key *mask,
 536		      struct netlink_ext_ack *extack)
 537{
 538	__be16 ethertype;
 539	int ret = 0;
 540#ifdef CONFIG_NET_CLS_IND
 541	if (tb[TCA_FLOWER_INDEV]) {
 542		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
 543		if (err < 0)
 544			return err;
 545		key->indev_ifindex = err;
 546		mask->indev_ifindex = 0xffffffff;
 547	}
 548#endif
 549
 550	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
 551		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
 552		       sizeof(key->eth.dst));
 553	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
 554		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
 555		       sizeof(key->eth.src));
 556
 557	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
 558		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
 559
 560		if (ethertype == htons(ETH_P_8021Q)) {
 561			fl_set_key_vlan(tb, &key->vlan, &mask->vlan);
 562			fl_set_key_val(tb, &key->basic.n_proto,
 563				       TCA_FLOWER_KEY_VLAN_ETH_TYPE,
 564				       &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
 565				       sizeof(key->basic.n_proto));
 566		} else {
 567			key->basic.n_proto = ethertype;
 568			mask->basic.n_proto = cpu_to_be16(~0);
 569		}
 570	}
 571
 572	if (key->basic.n_proto == htons(ETH_P_IP) ||
 573	    key->basic.n_proto == htons(ETH_P_IPV6)) {
 574		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
 575			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
 576			       sizeof(key->basic.ip_proto));
 577		fl_set_key_ip(tb, &key->ip, &mask->ip);
 578	}
 579
 580	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
 581		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 582		mask->control.addr_type = ~0;
 583		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
 584			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
 585			       sizeof(key->ipv4.src));
 586		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
 587			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
 588			       sizeof(key->ipv4.dst));
 589	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
 590		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
 591		mask->control.addr_type = ~0;
 592		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
 593			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
 594			       sizeof(key->ipv6.src));
 595		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
 596			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
 597			       sizeof(key->ipv6.dst));
 598	}
 599
 600	if (key->basic.ip_proto == IPPROTO_TCP) {
 601		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
 602			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
 603			       sizeof(key->tp.src));
 604		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
 605			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
 606			       sizeof(key->tp.dst));
 607		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
 608			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
 609			       sizeof(key->tcp.flags));
 610	} else if (key->basic.ip_proto == IPPROTO_UDP) {
 611		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
 612			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
 613			       sizeof(key->tp.src));
 614		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
 615			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
 616			       sizeof(key->tp.dst));
 617	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
 618		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
 619			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
 620			       sizeof(key->tp.src));
 621		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
 622			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
 623			       sizeof(key->tp.dst));
 624	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
 625		   key->basic.ip_proto == IPPROTO_ICMP) {
 626		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
 627			       &mask->icmp.type,
 628			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
 629			       sizeof(key->icmp.type));
 630		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
 631			       &mask->icmp.code,
 632			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
 633			       sizeof(key->icmp.code));
 634	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
 635		   key->basic.ip_proto == IPPROTO_ICMPV6) {
 636		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
 637			       &mask->icmp.type,
 638			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
 639			       sizeof(key->icmp.type));
 640		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
 641			       &mask->icmp.code,
 642			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
 643			       sizeof(key->icmp.code));
 644	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
 645		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
 646		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
 647		if (ret)
 648			return ret;
 649	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
 650		   key->basic.n_proto == htons(ETH_P_RARP)) {
 651		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
 652			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
 653			       sizeof(key->arp.sip));
 654		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
 655			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
 656			       sizeof(key->arp.tip));
 657		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
 658			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
 659			       sizeof(key->arp.op));
 660		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
 661			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
 662			       sizeof(key->arp.sha));
 663		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
 664			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
 665			       sizeof(key->arp.tha));
 666	}
 667
 668	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
 669	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
 670		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 671		mask->enc_control.addr_type = ~0;
 672		fl_set_key_val(tb, &key->enc_ipv4.src,
 673			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
 674			       &mask->enc_ipv4.src,
 675			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
 676			       sizeof(key->enc_ipv4.src));
 677		fl_set_key_val(tb, &key->enc_ipv4.dst,
 678			       TCA_FLOWER_KEY_ENC_IPV4_DST,
 679			       &mask->enc_ipv4.dst,
 680			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
 681			       sizeof(key->enc_ipv4.dst));
 682	}
 683
 684	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
 685	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
 686		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
 687		mask->enc_control.addr_type = ~0;
 688		fl_set_key_val(tb, &key->enc_ipv6.src,
 689			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
 690			       &mask->enc_ipv6.src,
 691			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
 692			       sizeof(key->enc_ipv6.src));
 693		fl_set_key_val(tb, &key->enc_ipv6.dst,
 694			       TCA_FLOWER_KEY_ENC_IPV6_DST,
 695			       &mask->enc_ipv6.dst,
 696			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
 697			       sizeof(key->enc_ipv6.dst));
 698	}
 699
 700	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
 701		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
 702		       sizeof(key->enc_key_id.keyid));
 703
 704	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
 705		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
 706		       sizeof(key->enc_tp.src));
 707
 708	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
 709		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
 710		       sizeof(key->enc_tp.dst));
 711
 712	if (tb[TCA_FLOWER_KEY_FLAGS])
 713		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
 714
 715	return ret;
 716}
 717
 718static bool fl_mask_eq(struct fl_flow_mask *mask1,
 719		       struct fl_flow_mask *mask2)
 720{
 721	const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
 722	const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
 723
 724	return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
 725	       !memcmp(lmask1, lmask2, fl_mask_range(mask1));
 726}
 727
 728static const struct rhashtable_params fl_ht_params = {
 729	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
 730	.head_offset = offsetof(struct cls_fl_filter, ht_node),
 731	.automatic_shrinking = true,
 732};
 733
 734static int fl_init_hashtable(struct cls_fl_head *head,
 735			     struct fl_flow_mask *mask)
 736{
 737	head->ht_params = fl_ht_params;
 738	head->ht_params.key_len = fl_mask_range(mask);
 739	head->ht_params.key_offset += mask->range.start;
 740
 741	return rhashtable_init(&head->ht, &head->ht_params);
 742}
 743
 744#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
 745#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
 746
 747#define FL_KEY_IS_MASKED(mask, member)						\
 748	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
 749		   0, FL_KEY_MEMBER_SIZE(member))				\
 750
 751#define FL_KEY_SET(keys, cnt, id, member)					\
 752	do {									\
 753		keys[cnt].key_id = id;						\
 754		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
 755		cnt++;								\
 756	} while(0);
 757
 758#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
 759	do {									\
 760		if (FL_KEY_IS_MASKED(mask, member))				\
 761			FL_KEY_SET(keys, cnt, id, member);			\
 762	} while(0);
 763
 764static void fl_init_dissector(struct cls_fl_head *head,
 765			      struct fl_flow_mask *mask)
 766{
 767	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
 768	size_t cnt = 0;
 769
 770	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
 771	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
 772	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 773			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
 774	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 775			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
 776	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 777			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
 778	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 779			     FLOW_DISSECTOR_KEY_PORTS, tp);
 780	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 781			     FLOW_DISSECTOR_KEY_IP, ip);
 782	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 783			     FLOW_DISSECTOR_KEY_TCP, tcp);
 784	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 785			     FLOW_DISSECTOR_KEY_ICMP, icmp);
 786	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 787			     FLOW_DISSECTOR_KEY_ARP, arp);
 788	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 789			     FLOW_DISSECTOR_KEY_MPLS, mpls);
 790	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 791			     FLOW_DISSECTOR_KEY_VLAN, vlan);
 792	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 793			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
 794	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 795			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
 796	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 797			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
 798	if (FL_KEY_IS_MASKED(&mask->key, enc_ipv4) ||
 799	    FL_KEY_IS_MASKED(&mask->key, enc_ipv6))
 800		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
 801			   enc_control);
 802	FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
 803			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
 804
 805	skb_flow_dissector_init(&head->dissector, keys, cnt);
 806}
 807
 808static int fl_check_assign_mask(struct cls_fl_head *head,
 809				struct fl_flow_mask *mask)
 810{
 811	int err;
 812
 813	if (head->mask_assigned) {
 814		if (!fl_mask_eq(&head->mask, mask))
 815			return -EINVAL;
 816		else
 817			return 0;
 818	}
 819
 820	/* Mask is not assigned yet. So assign it and init hashtable
 821	 * according to that.
 822	 */
 823	err = fl_init_hashtable(head, mask);
 824	if (err)
 825		return err;
 826	memcpy(&head->mask, mask, sizeof(head->mask));
 827	head->mask_assigned = true;
 828
 829	fl_init_dissector(head, mask);
 830
 831	return 0;
 832}
 833
 834static int fl_set_parms(struct net *net, struct tcf_proto *tp,
 835			struct cls_fl_filter *f, struct fl_flow_mask *mask,
 836			unsigned long base, struct nlattr **tb,
 837			struct nlattr *est, bool ovr,
 838			struct netlink_ext_ack *extack)
 839{
 840	int err;
 841
 842	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, extack);
 843	if (err < 0)
 844		return err;
 845
 846	if (tb[TCA_FLOWER_CLASSID]) {
 847		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
 848		tcf_bind_filter(tp, &f->res, base);
 849	}
 850
 851	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
 852	if (err)
 853		return err;
 854
 855	fl_mask_update_range(mask);
 856	fl_set_masked_key(&f->mkey, &f->key, mask);
 857
 858	return 0;
 859}
 860
 861static int fl_change(struct net *net, struct sk_buff *in_skb,
 862		     struct tcf_proto *tp, unsigned long base,
 863		     u32 handle, struct nlattr **tca,
 864		     void **arg, bool ovr, struct netlink_ext_ack *extack)
 865{
 866	struct cls_fl_head *head = rtnl_dereference(tp->root);
 867	struct cls_fl_filter *fold = *arg;
 868	struct cls_fl_filter *fnew;
 869	struct nlattr **tb;
 870	struct fl_flow_mask mask = {};
 871	int err;
 872
 873	if (!tca[TCA_OPTIONS])
 874		return -EINVAL;
 875
 876	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
 877	if (!tb)
 878		return -ENOBUFS;
 879
 880	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
 881			       fl_policy, NULL);
 882	if (err < 0)
 883		goto errout_tb;
 884
 885	if (fold && handle && fold->handle != handle) {
 886		err = -EINVAL;
 887		goto errout_tb;
 888	}
 889
 890	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
 891	if (!fnew) {
 892		err = -ENOBUFS;
 893		goto errout_tb;
 894	}
 895
 896	err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
 897	if (err < 0)
 898		goto errout;
 899
 900	if (!handle) {
 901		handle = 1;
 902		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
 903				    INT_MAX, GFP_KERNEL);
 904	} else if (!fold) {
 905		/* user specifies a handle and it doesn't exist */
 906		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
 907				    handle, GFP_KERNEL);
 908	}
 909	if (err)
 910		goto errout;
 911	fnew->handle = handle;
 912
 913	if (tb[TCA_FLOWER_FLAGS]) {
 914		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
 915
 916		if (!tc_flags_valid(fnew->flags)) {
 917			err = -EINVAL;
 918			goto errout_idr;
 919		}
 920	}
 921
 922	err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr,
 923			   extack);
 924	if (err)
 925		goto errout_idr;
 926
 927	err = fl_check_assign_mask(head, &mask);
 928	if (err)
 929		goto errout_idr;
 930
 931	if (!tc_skip_sw(fnew->flags)) {
 932		if (!fold && fl_lookup(head, &fnew->mkey)) {
 933			err = -EEXIST;
 934			goto errout_idr;
 935		}
 936
 937		err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
 938					     head->ht_params);
 939		if (err)
 940			goto errout_idr;
 941	}
 942
 943	if (!tc_skip_hw(fnew->flags)) {
 944		err = fl_hw_replace_filter(tp,
 945					   &head->dissector,
 946					   &mask.key,
 947					   fnew,
 948					   extack);
 949		if (err)
 950			goto errout_idr;
 951	}
 952
 953	if (!tc_in_hw(fnew->flags))
 954		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 955
 956	if (fold) {
 957		if (!tc_skip_sw(fold->flags))
 958			rhashtable_remove_fast(&head->ht, &fold->ht_node,
 959					       head->ht_params);
 960		if (!tc_skip_hw(fold->flags))
 961			fl_hw_destroy_filter(tp, fold, NULL);
 962	}
 963
 964	*arg = fnew;
 965
 966	if (fold) {
 967		idr_replace(&head->handle_idr, fnew, fnew->handle);
 968		list_replace_rcu(&fold->list, &fnew->list);
 969		tcf_unbind_filter(tp, &fold->res);
 970		tcf_exts_get_net(&fold->exts);
 971		call_rcu(&fold->rcu, fl_destroy_filter);
 972	} else {
 973		list_add_tail_rcu(&fnew->list, &head->filters);
 974	}
 975
 976	kfree(tb);
 977	return 0;
 978
 979errout_idr:
 980	if (!fold)
 981		idr_remove(&head->handle_idr, fnew->handle);
 982errout:
 983	tcf_exts_destroy(&fnew->exts);
 984	kfree(fnew);
 985errout_tb:
 986	kfree(tb);
 987	return err;
 988}
 989
 990static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
 991		     struct netlink_ext_ack *extack)
 992{
 993	struct cls_fl_head *head = rtnl_dereference(tp->root);
 994	struct cls_fl_filter *f = arg;
 995
 996	if (!tc_skip_sw(f->flags))
 997		rhashtable_remove_fast(&head->ht, &f->ht_node,
 998				       head->ht_params);
 999	__fl_delete(tp, f, extack);
1000	*last = list_empty(&head->filters);
1001	return 0;
1002}
1003
1004static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
1005{
1006	struct cls_fl_head *head = rtnl_dereference(tp->root);
1007	struct cls_fl_filter *f;
1008
1009	list_for_each_entry_rcu(f, &head->filters, list) {
1010		if (arg->count < arg->skip)
1011			goto skip;
1012		if (arg->fn(tp, f, arg) < 0) {
1013			arg->stop = 1;
1014			break;
1015		}
1016skip:
1017		arg->count++;
1018	}
1019}
1020
1021static int fl_dump_key_val(struct sk_buff *skb,
1022			   void *val, int val_type,
1023			   void *mask, int mask_type, int len)
1024{
1025	int err;
1026
1027	if (!memchr_inv(mask, 0, len))
1028		return 0;
1029	err = nla_put(skb, val_type, len, val);
1030	if (err)
1031		return err;
1032	if (mask_type != TCA_FLOWER_UNSPEC) {
1033		err = nla_put(skb, mask_type, len, mask);
1034		if (err)
1035			return err;
1036	}
1037	return 0;
1038}
1039
1040static int fl_dump_key_mpls(struct sk_buff *skb,
1041			    struct flow_dissector_key_mpls *mpls_key,
1042			    struct flow_dissector_key_mpls *mpls_mask)
1043{
1044	int err;
1045
1046	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1047		return 0;
1048	if (mpls_mask->mpls_ttl) {
1049		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1050				 mpls_key->mpls_ttl);
1051		if (err)
1052			return err;
1053	}
1054	if (mpls_mask->mpls_tc) {
1055		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1056				 mpls_key->mpls_tc);
1057		if (err)
1058			return err;
1059	}
1060	if (mpls_mask->mpls_label) {
1061		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1062				  mpls_key->mpls_label);
1063		if (err)
1064			return err;
1065	}
1066	if (mpls_mask->mpls_bos) {
1067		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1068				 mpls_key->mpls_bos);
1069		if (err)
1070			return err;
1071	}
1072	return 0;
1073}
1074
1075static int fl_dump_key_ip(struct sk_buff *skb,
1076			  struct flow_dissector_key_ip *key,
1077			  struct flow_dissector_key_ip *mask)
1078{
1079	if (fl_dump_key_val(skb, &key->tos, TCA_FLOWER_KEY_IP_TOS, &mask->tos,
1080			    TCA_FLOWER_KEY_IP_TOS_MASK, sizeof(key->tos)) ||
1081	    fl_dump_key_val(skb, &key->ttl, TCA_FLOWER_KEY_IP_TTL, &mask->ttl,
1082			    TCA_FLOWER_KEY_IP_TTL_MASK, sizeof(key->ttl)))
1083		return -1;
1084
1085	return 0;
1086}
1087
1088static int fl_dump_key_vlan(struct sk_buff *skb,
1089			    struct flow_dissector_key_vlan *vlan_key,
1090			    struct flow_dissector_key_vlan *vlan_mask)
1091{
1092	int err;
1093
1094	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1095		return 0;
1096	if (vlan_mask->vlan_id) {
1097		err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID,
1098				  vlan_key->vlan_id);
1099		if (err)
1100			return err;
1101	}
1102	if (vlan_mask->vlan_priority) {
1103		err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO,
1104				 vlan_key->vlan_priority);
1105		if (err)
1106			return err;
1107	}
1108	return 0;
1109}
1110
1111static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
1112			    u32 *flower_key, u32 *flower_mask,
1113			    u32 flower_flag_bit, u32 dissector_flag_bit)
1114{
1115	if (dissector_mask & dissector_flag_bit) {
1116		*flower_mask |= flower_flag_bit;
1117		if (dissector_key & dissector_flag_bit)
1118			*flower_key |= flower_flag_bit;
1119	}
1120}
1121
1122static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
1123{
1124	u32 key, mask;
1125	__be32 _key, _mask;
1126	int err;
1127
1128	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
1129		return 0;
1130
1131	key = 0;
1132	mask = 0;
1133
1134	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1135			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1136	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1137			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1138			FLOW_DIS_FIRST_FRAG);
1139
1140	_key = cpu_to_be32(key);
1141	_mask = cpu_to_be32(mask);
1142
1143	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
1144	if (err)
1145		return err;
1146
1147	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
1148}
1149
1150static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
1151		   struct sk_buff *skb, struct tcmsg *t)
1152{
1153	struct cls_fl_head *head = rtnl_dereference(tp->root);
1154	struct cls_fl_filter *f = fh;
1155	struct nlattr *nest;
1156	struct fl_flow_key *key, *mask;
1157
1158	if (!f)
1159		return skb->len;
1160
1161	t->tcm_handle = f->handle;
1162
1163	nest = nla_nest_start(skb, TCA_OPTIONS);
1164	if (!nest)
1165		goto nla_put_failure;
1166
1167	if (f->res.classid &&
1168	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
1169		goto nla_put_failure;
1170
1171	key = &f->key;
1172	mask = &head->mask.key;
1173
1174	if (mask->indev_ifindex) {
1175		struct net_device *dev;
1176
1177		dev = __dev_get_by_index(net, key->indev_ifindex);
1178		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
1179			goto nla_put_failure;
1180	}
1181
1182	if (!tc_skip_hw(f->flags))
1183		fl_hw_update_stats(tp, f);
1184
1185	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1186			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1187			    sizeof(key->eth.dst)) ||
1188	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1189			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1190			    sizeof(key->eth.src)) ||
1191	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
1192			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
1193			    sizeof(key->basic.n_proto)))
1194		goto nla_put_failure;
1195
1196	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
1197		goto nla_put_failure;
1198
1199	if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan))
1200		goto nla_put_failure;
1201
1202	if ((key->basic.n_proto == htons(ETH_P_IP) ||
1203	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
1204	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1205			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1206			    sizeof(key->basic.ip_proto)) ||
1207	    fl_dump_key_ip(skb, &key->ip, &mask->ip)))
1208		goto nla_put_failure;
1209
1210	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1211	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1212			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1213			     sizeof(key->ipv4.src)) ||
1214	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1215			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1216			     sizeof(key->ipv4.dst))))
1217		goto nla_put_failure;
1218	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1219		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1220				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1221				  sizeof(key->ipv6.src)) ||
1222		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1223				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1224				  sizeof(key->ipv6.dst))))
1225		goto nla_put_failure;
1226
1227	if (key->basic.ip_proto == IPPROTO_TCP &&
1228	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1229			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1230			     sizeof(key->tp.src)) ||
1231	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1232			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1233			     sizeof(key->tp.dst)) ||
1234	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1235			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1236			     sizeof(key->tcp.flags))))
1237		goto nla_put_failure;
1238	else if (key->basic.ip_proto == IPPROTO_UDP &&
1239		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1240				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1241				  sizeof(key->tp.src)) ||
1242		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1243				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1244				  sizeof(key->tp.dst))))
1245		goto nla_put_failure;
1246	else if (key->basic.ip_proto == IPPROTO_SCTP &&
1247		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1248				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1249				  sizeof(key->tp.src)) ||
1250		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1251				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1252				  sizeof(key->tp.dst))))
1253		goto nla_put_failure;
1254	else if (key->basic.n_proto == htons(ETH_P_IP) &&
1255		 key->basic.ip_proto == IPPROTO_ICMP &&
1256		 (fl_dump_key_val(skb, &key->icmp.type,
1257				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
1258				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1259				  sizeof(key->icmp.type)) ||
1260		  fl_dump_key_val(skb, &key->icmp.code,
1261				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
1262				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1263				  sizeof(key->icmp.code))))
1264		goto nla_put_failure;
1265	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1266		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
1267		 (fl_dump_key_val(skb, &key->icmp.type,
1268				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
1269				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1270				  sizeof(key->icmp.type)) ||
1271		  fl_dump_key_val(skb, &key->icmp.code,
1272				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
1273				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1274				  sizeof(key->icmp.code))))
1275		goto nla_put_failure;
1276	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
1277		  key->basic.n_proto == htons(ETH_P_RARP)) &&
1278		 (fl_dump_key_val(skb, &key->arp.sip,
1279				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
1280				  TCA_FLOWER_KEY_ARP_SIP_MASK,
1281				  sizeof(key->arp.sip)) ||
1282		  fl_dump_key_val(skb, &key->arp.tip,
1283				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
1284				  TCA_FLOWER_KEY_ARP_TIP_MASK,
1285				  sizeof(key->arp.tip)) ||
1286		  fl_dump_key_val(skb, &key->arp.op,
1287				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
1288				  TCA_FLOWER_KEY_ARP_OP_MASK,
1289				  sizeof(key->arp.op)) ||
1290		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1291				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1292				  sizeof(key->arp.sha)) ||
1293		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1294				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1295				  sizeof(key->arp.tha))))
1296		goto nla_put_failure;
1297
1298	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
1299	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
1300			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
1301			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1302			    sizeof(key->enc_ipv4.src)) ||
1303	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
1304			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
1305			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1306			     sizeof(key->enc_ipv4.dst))))
1307		goto nla_put_failure;
1308	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
1309		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
1310			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
1311			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1312			    sizeof(key->enc_ipv6.src)) ||
1313		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
1314				 TCA_FLOWER_KEY_ENC_IPV6_DST,
1315				 &mask->enc_ipv6.dst,
1316				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1317			    sizeof(key->enc_ipv6.dst))))
1318		goto nla_put_failure;
1319
1320	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
1321			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
1322			    sizeof(key->enc_key_id)) ||
1323	    fl_dump_key_val(skb, &key->enc_tp.src,
1324			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1325			    &mask->enc_tp.src,
1326			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1327			    sizeof(key->enc_tp.src)) ||
1328	    fl_dump_key_val(skb, &key->enc_tp.dst,
1329			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1330			    &mask->enc_tp.dst,
1331			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1332			    sizeof(key->enc_tp.dst)))
1333		goto nla_put_failure;
1334
1335	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
1336		goto nla_put_failure;
1337
1338	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
1339		goto nla_put_failure;
1340
1341	if (tcf_exts_dump(skb, &f->exts))
1342		goto nla_put_failure;
1343
1344	nla_nest_end(skb, nest);
1345
1346	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
1347		goto nla_put_failure;
1348
1349	return skb->len;
1350
1351nla_put_failure:
1352	nla_nest_cancel(skb, nest);
1353	return -1;
1354}
1355
1356static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
1357{
1358	struct cls_fl_filter *f = fh;
1359
1360	if (f && f->res.classid == classid)
1361		f->res.class = cl;
1362}
1363
1364static struct tcf_proto_ops cls_fl_ops __read_mostly = {
1365	.kind		= "flower",
1366	.classify	= fl_classify,
1367	.init		= fl_init,
1368	.destroy	= fl_destroy,
1369	.get		= fl_get,
1370	.change		= fl_change,
1371	.delete		= fl_delete,
1372	.walk		= fl_walk,
1373	.dump		= fl_dump,
1374	.bind_class	= fl_bind_class,
1375	.owner		= THIS_MODULE,
1376};
1377
1378static int __init cls_fl_init(void)
1379{
1380	return register_tcf_proto_ops(&cls_fl_ops);
1381}
1382
1383static void __exit cls_fl_exit(void)
1384{
1385	unregister_tcf_proto_ops(&cls_fl_ops);
1386}
1387
1388module_init(cls_fl_init);
1389module_exit(cls_fl_exit);
1390
1391MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
1392MODULE_DESCRIPTION("Flower classifier");
1393MODULE_LICENSE("GPL v2");