Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/* -
   3 * net/sched/act_ct.c  Connection Tracking action
   4 *
   5 * Authors:   Paul Blakey <paulb@mellanox.com>
   6 *            Yossi Kuperman <yossiku@mellanox.com>
   7 *            Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/kernel.h>
  13#include <linux/skbuff.h>
  14#include <linux/rtnetlink.h>
  15#include <linux/pkt_cls.h>
  16#include <linux/ip.h>
  17#include <linux/ipv6.h>
  18#include <linux/rhashtable.h>
  19#include <net/netlink.h>
  20#include <net/pkt_sched.h>
  21#include <net/pkt_cls.h>
  22#include <net/act_api.h>
  23#include <net/ip.h>
  24#include <net/ipv6_frag.h>
  25#include <uapi/linux/tc_act/tc_ct.h>
  26#include <net/tc_act/tc_ct.h>
  27#include <net/tc_wrapper.h>
  28
  29#include <net/netfilter/nf_flow_table.h>
  30#include <net/netfilter/nf_conntrack.h>
  31#include <net/netfilter/nf_conntrack_core.h>
  32#include <net/netfilter/nf_conntrack_zones.h>
  33#include <net/netfilter/nf_conntrack_helper.h>
  34#include <net/netfilter/nf_conntrack_acct.h>
  35#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
  36#include <net/netfilter/nf_conntrack_act_ct.h>
  37#include <net/netfilter/nf_conntrack_seqadj.h>
  38#include <uapi/linux/netfilter/nf_nat.h>
  39
  40static struct workqueue_struct *act_ct_wq;
  41static struct rhashtable zones_ht;
  42static DEFINE_MUTEX(zones_mutex);
  43
  44struct tcf_ct_flow_table {
  45	struct rhash_head node; /* In zones tables */
  46
  47	struct rcu_work rwork;
  48	struct nf_flowtable nf_ft;
  49	refcount_t ref;
  50	u16 zone;
  51
  52	bool dying;
  53};
  54
  55static const struct rhashtable_params zones_params = {
  56	.head_offset = offsetof(struct tcf_ct_flow_table, node),
  57	.key_offset = offsetof(struct tcf_ct_flow_table, zone),
  58	.key_len = sizeof_field(struct tcf_ct_flow_table, zone),
  59	.automatic_shrinking = true,
  60};
  61
  62static struct flow_action_entry *
  63tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
  64{
  65	int i = flow_action->num_entries++;
  66
  67	return &flow_action->entries[i];
  68}
  69
  70static void tcf_ct_add_mangle_action(struct flow_action *action,
  71				     enum flow_action_mangle_base htype,
  72				     u32 offset,
  73				     u32 mask,
  74				     u32 val)
  75{
  76	struct flow_action_entry *entry;
  77
  78	entry = tcf_ct_flow_table_flow_action_get_next(action);
  79	entry->id = FLOW_ACTION_MANGLE;
  80	entry->mangle.htype = htype;
  81	entry->mangle.mask = ~mask;
  82	entry->mangle.offset = offset;
  83	entry->mangle.val = val;
  84}
  85
  86/* The following nat helper functions check if the inverted reverse tuple
  87 * (target) is different then the current dir tuple - meaning nat for ports
  88 * and/or ip is needed, and add the relevant mangle actions.
  89 */
  90static void
  91tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
  92				      struct nf_conntrack_tuple target,
  93				      struct flow_action *action)
  94{
  95	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
  96		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
  97					 offsetof(struct iphdr, saddr),
  98					 0xFFFFFFFF,
  99					 be32_to_cpu(target.src.u3.ip));
 100	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
 101		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
 102					 offsetof(struct iphdr, daddr),
 103					 0xFFFFFFFF,
 104					 be32_to_cpu(target.dst.u3.ip));
 105}
 106
 107static void
 108tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
 109				   union nf_inet_addr *addr,
 110				   u32 offset)
 111{
 112	int i;
 113
 114	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
 115		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
 116					 i * sizeof(u32) + offset,
 117					 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
 118}
 119
 120static void
 121tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
 122				      struct nf_conntrack_tuple target,
 123				      struct flow_action *action)
 124{
 125	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
 126		tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
 127						   offsetof(struct ipv6hdr,
 128							    saddr));
 129	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
 130		tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
 131						   offsetof(struct ipv6hdr,
 132							    daddr));
 133}
 134
 135static void
 136tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
 137				     struct nf_conntrack_tuple target,
 138				     struct flow_action *action)
 139{
 140	__be16 target_src = target.src.u.tcp.port;
 141	__be16 target_dst = target.dst.u.tcp.port;
 142
 143	if (target_src != tuple->src.u.tcp.port)
 144		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
 145					 offsetof(struct tcphdr, source),
 146					 0xFFFF, be16_to_cpu(target_src));
 147	if (target_dst != tuple->dst.u.tcp.port)
 148		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
 149					 offsetof(struct tcphdr, dest),
 150					 0xFFFF, be16_to_cpu(target_dst));
 151}
 152
 153static void
 154tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
 155				     struct nf_conntrack_tuple target,
 156				     struct flow_action *action)
 157{
 158	__be16 target_src = target.src.u.udp.port;
 159	__be16 target_dst = target.dst.u.udp.port;
 160
 161	if (target_src != tuple->src.u.udp.port)
 162		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
 163					 offsetof(struct udphdr, source),
 164					 0xFFFF, be16_to_cpu(target_src));
 165	if (target_dst != tuple->dst.u.udp.port)
 166		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
 167					 offsetof(struct udphdr, dest),
 168					 0xFFFF, be16_to_cpu(target_dst));
 169}
 170
 171static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
 172					      enum ip_conntrack_dir dir,
 173					      enum ip_conntrack_info ctinfo,
 174					      struct flow_action *action)
 175{
 176	struct nf_conn_labels *ct_labels;
 177	struct flow_action_entry *entry;
 178	u32 *act_ct_labels;
 179
 180	entry = tcf_ct_flow_table_flow_action_get_next(action);
 181	entry->id = FLOW_ACTION_CT_METADATA;
 182#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
 183	entry->ct_metadata.mark = READ_ONCE(ct->mark);
 184#endif
 185	/* aligns with the CT reference on the SKB nf_ct_set */
 186	entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
 187	entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
 188
 189	act_ct_labels = entry->ct_metadata.labels;
 190	ct_labels = nf_ct_labels_find(ct);
 191	if (ct_labels)
 192		memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
 193	else
 194		memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
 195}
 196
 197static int tcf_ct_flow_table_add_action_nat(struct net *net,
 198					    struct nf_conn *ct,
 199					    enum ip_conntrack_dir dir,
 200					    struct flow_action *action)
 201{
 202	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
 203	struct nf_conntrack_tuple target;
 204
 205	if (!(ct->status & IPS_NAT_MASK))
 206		return 0;
 207
 208	nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
 209
 210	switch (tuple->src.l3num) {
 211	case NFPROTO_IPV4:
 212		tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
 213						      action);
 214		break;
 215	case NFPROTO_IPV6:
 216		tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
 217						      action);
 218		break;
 219	default:
 220		return -EOPNOTSUPP;
 221	}
 222
 223	switch (nf_ct_protonum(ct)) {
 224	case IPPROTO_TCP:
 225		tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
 226		break;
 227	case IPPROTO_UDP:
 228		tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
 229		break;
 230	default:
 231		return -EOPNOTSUPP;
 232	}
 233
 234	return 0;
 235}
 236
 237static int tcf_ct_flow_table_fill_actions(struct net *net,
 238					  struct flow_offload *flow,
 239					  enum flow_offload_tuple_dir tdir,
 240					  struct nf_flow_rule *flow_rule)
 241{
 242	struct flow_action *action = &flow_rule->rule->action;
 243	int num_entries = action->num_entries;
 244	struct nf_conn *ct = flow->ct;
 245	enum ip_conntrack_info ctinfo;
 246	enum ip_conntrack_dir dir;
 247	int i, err;
 248
 249	switch (tdir) {
 250	case FLOW_OFFLOAD_DIR_ORIGINAL:
 251		dir = IP_CT_DIR_ORIGINAL;
 252		ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
 253			IP_CT_ESTABLISHED : IP_CT_NEW;
 254		if (ctinfo == IP_CT_ESTABLISHED)
 255			set_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
 256		break;
 257	case FLOW_OFFLOAD_DIR_REPLY:
 258		dir = IP_CT_DIR_REPLY;
 259		ctinfo = IP_CT_ESTABLISHED_REPLY;
 260		break;
 261	default:
 262		return -EOPNOTSUPP;
 263	}
 264
 265	err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
 266	if (err)
 267		goto err_nat;
 268
 269	tcf_ct_flow_table_add_action_meta(ct, dir, ctinfo, action);
 270	return 0;
 271
 272err_nat:
 273	/* Clear filled actions */
 274	for (i = num_entries; i < action->num_entries; i++)
 275		memset(&action->entries[i], 0, sizeof(action->entries[i]));
 276	action->num_entries = num_entries;
 277
 278	return err;
 279}
 280
 281static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
 282{
 283	return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
 284	       test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
 285	       !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
 286	       !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
 287}
 288
 289static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft);
 290
 291static void tcf_ct_nf_get(struct nf_flowtable *ft)
 292{
 293	struct tcf_ct_flow_table *ct_ft =
 294		container_of(ft, struct tcf_ct_flow_table, nf_ft);
 295
 296	tcf_ct_flow_table_get_ref(ct_ft);
 297}
 298
 299static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft);
 300
 301static void tcf_ct_nf_put(struct nf_flowtable *ft)
 302{
 303	struct tcf_ct_flow_table *ct_ft =
 304		container_of(ft, struct tcf_ct_flow_table, nf_ft);
 305
 306	tcf_ct_flow_table_put(ct_ft);
 307}
 308
 309static struct nf_flowtable_type flowtable_ct = {
 310	.gc		= tcf_ct_flow_is_outdated,
 311	.action		= tcf_ct_flow_table_fill_actions,
 312	.get		= tcf_ct_nf_get,
 313	.put		= tcf_ct_nf_put,
 314	.owner		= THIS_MODULE,
 315};
 316
 317static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
 318{
 319	struct tcf_ct_flow_table *ct_ft;
 320	int err = -ENOMEM;
 321
 322	mutex_lock(&zones_mutex);
 323	ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
 324	if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
 325		goto out_unlock;
 326
 327	ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
 328	if (!ct_ft)
 329		goto err_alloc;
 330	refcount_set(&ct_ft->ref, 1);
 331
 332	ct_ft->zone = params->zone;
 333	err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
 334	if (err)
 335		goto err_insert;
 336
 337	ct_ft->nf_ft.type = &flowtable_ct;
 338	ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
 339			      NF_FLOWTABLE_COUNTER;
 340	err = nf_flow_table_init(&ct_ft->nf_ft);
 341	if (err)
 342		goto err_init;
 343	write_pnet(&ct_ft->nf_ft.net, net);
 344
 345	__module_get(THIS_MODULE);
 346out_unlock:
 347	params->ct_ft = ct_ft;
 348	params->nf_ft = &ct_ft->nf_ft;
 349	mutex_unlock(&zones_mutex);
 350
 351	return 0;
 352
 353err_init:
 354	rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
 355err_insert:
 356	kfree(ct_ft);
 357err_alloc:
 358	mutex_unlock(&zones_mutex);
 359	return err;
 360}
 361
 362static void tcf_ct_flow_table_get_ref(struct tcf_ct_flow_table *ct_ft)
 363{
 364	refcount_inc(&ct_ft->ref);
 365}
 366
 367static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
 368{
 369	struct tcf_ct_flow_table *ct_ft;
 370	struct flow_block *block;
 371
 372	ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
 373			     rwork);
 374	nf_flow_table_free(&ct_ft->nf_ft);
 375
 376	block = &ct_ft->nf_ft.flow_block;
 377	down_write(&ct_ft->nf_ft.flow_block_lock);
 378	WARN_ON(!list_empty(&block->cb_list));
 379	up_write(&ct_ft->nf_ft.flow_block_lock);
 380	kfree(ct_ft);
 381
 382	module_put(THIS_MODULE);
 383}
 384
 385static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
 386{
 387	if (refcount_dec_and_test(&ct_ft->ref)) {
 388		rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
 389		INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
 390		queue_rcu_work(act_ct_wq, &ct_ft->rwork);
 391	}
 392}
 393
 394static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
 395				 struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
 396{
 397	entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
 398	entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
 399}
 400
 401static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
 402{
 403	struct nf_conn_act_ct_ext *act_ct_ext;
 404
 405	act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
 406	if (act_ct_ext) {
 407		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
 408		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
 409	}
 410}
 411
 412static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
 413				  struct nf_conn *ct,
 414				  bool tcp, bool bidirectional)
 415{
 416	struct nf_conn_act_ct_ext *act_ct_ext;
 417	struct flow_offload *entry;
 418	int err;
 419
 420	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
 421		return;
 422
 423	entry = flow_offload_alloc(ct);
 424	if (!entry) {
 425		WARN_ON_ONCE(1);
 426		goto err_alloc;
 427	}
 428
 429	if (tcp) {
 430		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
 431		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
 432	}
 433	if (bidirectional)
 434		__set_bit(NF_FLOW_HW_BIDIRECTIONAL, &entry->flags);
 435
 436	act_ct_ext = nf_conn_act_ct_ext_find(ct);
 437	if (act_ct_ext) {
 438		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
 439		tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
 440	}
 441
 442	err = flow_offload_add(&ct_ft->nf_ft, entry);
 443	if (err)
 444		goto err_add;
 445
 446	return;
 447
 448err_add:
 449	flow_offload_free(entry);
 450err_alloc:
 451	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
 452}
 453
 454static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
 455					   struct nf_conn *ct,
 456					   enum ip_conntrack_info ctinfo)
 457{
 458	bool tcp = false, bidirectional = true;
 459
 460	switch (nf_ct_protonum(ct)) {
 461	case IPPROTO_TCP:
 462		if ((ctinfo != IP_CT_ESTABLISHED &&
 463		     ctinfo != IP_CT_ESTABLISHED_REPLY) ||
 464		    !test_bit(IPS_ASSURED_BIT, &ct->status) ||
 465		    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
 466			return;
 467
 468		tcp = true;
 469		break;
 470	case IPPROTO_UDP:
 471		if (!nf_ct_is_confirmed(ct))
 472			return;
 473		if (!test_bit(IPS_ASSURED_BIT, &ct->status))
 474			bidirectional = false;
 475		break;
 476#ifdef CONFIG_NF_CT_PROTO_GRE
 477	case IPPROTO_GRE: {
 478		struct nf_conntrack_tuple *tuple;
 479
 480		if ((ctinfo != IP_CT_ESTABLISHED &&
 481		     ctinfo != IP_CT_ESTABLISHED_REPLY) ||
 482		    !test_bit(IPS_ASSURED_BIT, &ct->status) ||
 483		    ct->status & IPS_NAT_MASK)
 484			return;
 485
 486		tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
 487		/* No support for GRE v1 */
 488		if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
 489			return;
 490		break;
 491	}
 492#endif
 493	default:
 494		return;
 495	}
 496
 497	if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
 498	    ct->status & IPS_SEQ_ADJUST)
 499		return;
 500
 501	tcf_ct_flow_table_add(ct_ft, ct, tcp, bidirectional);
 502}
 503
 504static bool
 505tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
 506				  struct flow_offload_tuple *tuple,
 507				  struct tcphdr **tcph)
 508{
 509	struct flow_ports *ports;
 510	unsigned int thoff;
 511	struct iphdr *iph;
 512	size_t hdrsize;
 513	u8 ipproto;
 514
 515	if (!pskb_network_may_pull(skb, sizeof(*iph)))
 516		return false;
 517
 518	iph = ip_hdr(skb);
 519	thoff = iph->ihl * 4;
 520
 521	if (ip_is_fragment(iph) ||
 522	    unlikely(thoff != sizeof(struct iphdr)))
 523		return false;
 524
 525	ipproto = iph->protocol;
 526	switch (ipproto) {
 527	case IPPROTO_TCP:
 528		hdrsize = sizeof(struct tcphdr);
 529		break;
 530	case IPPROTO_UDP:
 531		hdrsize = sizeof(*ports);
 532		break;
 533#ifdef CONFIG_NF_CT_PROTO_GRE
 534	case IPPROTO_GRE:
 535		hdrsize = sizeof(struct gre_base_hdr);
 536		break;
 537#endif
 538	default:
 539		return false;
 540	}
 541
 542	if (iph->ttl <= 1)
 543		return false;
 544
 545	if (!pskb_network_may_pull(skb, thoff + hdrsize))
 546		return false;
 547
 548	switch (ipproto) {
 549	case IPPROTO_TCP:
 550		*tcph = (void *)(skb_network_header(skb) + thoff);
 551		fallthrough;
 552	case IPPROTO_UDP:
 553		ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 554		tuple->src_port = ports->source;
 555		tuple->dst_port = ports->dest;
 556		break;
 557	case IPPROTO_GRE: {
 558		struct gre_base_hdr *greh;
 559
 560		greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
 561		if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
 562			return false;
 563		break;
 564	}
 565	}
 566
 567	iph = ip_hdr(skb);
 568
 569	tuple->src_v4.s_addr = iph->saddr;
 570	tuple->dst_v4.s_addr = iph->daddr;
 571	tuple->l3proto = AF_INET;
 572	tuple->l4proto = ipproto;
 573
 574	return true;
 575}
 576
 577static bool
 578tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
 579				  struct flow_offload_tuple *tuple,
 580				  struct tcphdr **tcph)
 581{
 582	struct flow_ports *ports;
 583	struct ipv6hdr *ip6h;
 584	unsigned int thoff;
 585	size_t hdrsize;
 586	u8 nexthdr;
 587
 588	if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
 589		return false;
 590
 591	ip6h = ipv6_hdr(skb);
 592	thoff = sizeof(*ip6h);
 593
 594	nexthdr = ip6h->nexthdr;
 595	switch (nexthdr) {
 596	case IPPROTO_TCP:
 597		hdrsize = sizeof(struct tcphdr);
 598		break;
 599	case IPPROTO_UDP:
 600		hdrsize = sizeof(*ports);
 601		break;
 602#ifdef CONFIG_NF_CT_PROTO_GRE
 603	case IPPROTO_GRE:
 604		hdrsize = sizeof(struct gre_base_hdr);
 605		break;
 606#endif
 607	default:
 608		return false;
 609	}
 610
 611	if (ip6h->hop_limit <= 1)
 612		return false;
 613
 614	if (!pskb_network_may_pull(skb, thoff + hdrsize))
 615		return false;
 616
 617	switch (nexthdr) {
 618	case IPPROTO_TCP:
 619		*tcph = (void *)(skb_network_header(skb) + thoff);
 620		fallthrough;
 621	case IPPROTO_UDP:
 622		ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 623		tuple->src_port = ports->source;
 624		tuple->dst_port = ports->dest;
 625		break;
 626	case IPPROTO_GRE: {
 627		struct gre_base_hdr *greh;
 628
 629		greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
 630		if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
 631			return false;
 632		break;
 633	}
 634	}
 635
 636	ip6h = ipv6_hdr(skb);
 637
 638	tuple->src_v6 = ip6h->saddr;
 639	tuple->dst_v6 = ip6h->daddr;
 640	tuple->l3proto = AF_INET6;
 641	tuple->l4proto = nexthdr;
 642
 643	return true;
 644}
 645
 646static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
 647				     struct sk_buff *skb,
 648				     u8 family)
 649{
 650	struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
 651	struct flow_offload_tuple_rhash *tuplehash;
 652	struct flow_offload_tuple tuple = {};
 653	enum ip_conntrack_info ctinfo;
 654	struct tcphdr *tcph = NULL;
 655	bool force_refresh = false;
 656	struct flow_offload *flow;
 657	struct nf_conn *ct;
 658	u8 dir;
 659
 660	switch (family) {
 661	case NFPROTO_IPV4:
 662		if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
 663			return false;
 664		break;
 665	case NFPROTO_IPV6:
 666		if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
 667			return false;
 668		break;
 669	default:
 670		return false;
 671	}
 672
 673	tuplehash = flow_offload_lookup(nf_ft, &tuple);
 674	if (!tuplehash)
 675		return false;
 676
 677	dir = tuplehash->tuple.dir;
 678	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
 679	ct = flow->ct;
 680
 681	if (dir == FLOW_OFFLOAD_DIR_REPLY &&
 682	    !test_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags)) {
 683		/* Only offload reply direction after connection became
 684		 * assured.
 685		 */
 686		if (test_bit(IPS_ASSURED_BIT, &ct->status))
 687			set_bit(NF_FLOW_HW_BIDIRECTIONAL, &flow->flags);
 688		else if (test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags))
 689			/* If flow_table flow has already been updated to the
 690			 * established state, then don't refresh.
 691			 */
 692			return false;
 693		force_refresh = true;
 694	}
 695
 696	if (tcph && (unlikely(tcph->fin || tcph->rst))) {
 697		flow_offload_teardown(flow);
 698		return false;
 699	}
 700
 701	if (dir == FLOW_OFFLOAD_DIR_ORIGINAL)
 702		ctinfo = test_bit(IPS_SEEN_REPLY_BIT, &ct->status) ?
 703			IP_CT_ESTABLISHED : IP_CT_NEW;
 704	else
 705		ctinfo = IP_CT_ESTABLISHED_REPLY;
 706
 707	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
 708	tcf_ct_flow_ct_ext_ifidx_update(flow);
 709	flow_offload_refresh(nf_ft, flow, force_refresh);
 710	if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
 711		/* Process this flow in SW to allow promoting to ASSURED */
 712		return false;
 713	}
 714
 715	nf_conntrack_get(&ct->ct_general);
 716	nf_ct_set(skb, ct, ctinfo);
 717	if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
 718		nf_ct_acct_update(ct, dir, skb->len);
 719
 720	return true;
 721}
 722
 723static int tcf_ct_flow_tables_init(void)
 724{
 725	return rhashtable_init(&zones_ht, &zones_params);
 726}
 727
 728static void tcf_ct_flow_tables_uninit(void)
 729{
 730	rhashtable_destroy(&zones_ht);
 731}
 732
 733static struct tc_action_ops act_ct_ops;
 734
 735struct tc_ct_action_net {
 736	struct tc_action_net tn; /* Must be first */
 737};
 738
 739/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
 740static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
 741				   struct tcf_ct_params *p)
 742{
 743	enum ip_conntrack_info ctinfo;
 744	struct nf_conn *ct;
 745
 746	ct = nf_ct_get(skb, &ctinfo);
 747	if (!ct)
 748		return false;
 749	if (!net_eq(net, read_pnet(&ct->ct_net)))
 750		goto drop_ct;
 751	if (nf_ct_zone(ct)->id != p->zone)
 752		goto drop_ct;
 753	if (p->helper) {
 754		struct nf_conn_help *help;
 755
 756		help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
 757		if (help && rcu_access_pointer(help->helper) != p->helper)
 758			goto drop_ct;
 759	}
 760
 761	/* Force conntrack entry direction. */
 762	if ((p->ct_action & TCA_CT_ACT_FORCE) &&
 763	    CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
 764		if (nf_ct_is_confirmed(ct))
 765			nf_ct_kill(ct);
 766
 767		goto drop_ct;
 768	}
 769
 770	return true;
 771
 772drop_ct:
 773	nf_ct_put(ct);
 774	nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 775
 776	return false;
 777}
 778
 779static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
 780{
 781	u8 family = NFPROTO_UNSPEC;
 782
 783	switch (skb_protocol(skb, true)) {
 784	case htons(ETH_P_IP):
 785		family = NFPROTO_IPV4;
 786		break;
 787	case htons(ETH_P_IPV6):
 788		family = NFPROTO_IPV6;
 789		break;
 790	default:
 791		break;
 792	}
 793
 794	return family;
 795}
 796
 797static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
 798{
 799	unsigned int len;
 800
 801	len =  skb_network_offset(skb) + sizeof(struct iphdr);
 802	if (unlikely(skb->len < len))
 803		return -EINVAL;
 804	if (unlikely(!pskb_may_pull(skb, len)))
 805		return -ENOMEM;
 806
 807	*frag = ip_is_fragment(ip_hdr(skb));
 808	return 0;
 809}
 810
 811static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
 812{
 813	unsigned int flags = 0, len, payload_ofs = 0;
 814	unsigned short frag_off;
 815	int nexthdr;
 816
 817	len =  skb_network_offset(skb) + sizeof(struct ipv6hdr);
 818	if (unlikely(skb->len < len))
 819		return -EINVAL;
 820	if (unlikely(!pskb_may_pull(skb, len)))
 821		return -ENOMEM;
 822
 823	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
 824	if (unlikely(nexthdr < 0))
 825		return -EPROTO;
 826
 827	*frag = flags & IP6_FH_F_FRAG;
 828	return 0;
 829}
 830
 831static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
 832				   u8 family, u16 zone, bool *defrag)
 833{
 834	enum ip_conntrack_info ctinfo;
 835	struct nf_conn *ct;
 836	int err = 0;
 837	bool frag;
 838	u8 proto;
 839	u16 mru;
 840
 841	/* Previously seen (loopback)? Ignore. */
 842	ct = nf_ct_get(skb, &ctinfo);
 843	if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
 844		return 0;
 845
 846	if (family == NFPROTO_IPV4)
 847		err = tcf_ct_ipv4_is_fragment(skb, &frag);
 848	else
 849		err = tcf_ct_ipv6_is_fragment(skb, &frag);
 850	if (err || !frag)
 851		return err;
 852
 853	err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
 854	if (err)
 855		return err;
 856
 857	*defrag = true;
 858	tc_skb_cb(skb)->mru = mru;
 859
 860	return 0;
 861}
 862
 863static void tcf_ct_params_free(struct tcf_ct_params *params)
 864{
 865	if (params->helper) {
 866#if IS_ENABLED(CONFIG_NF_NAT)
 867		if (params->ct_action & TCA_CT_ACT_NAT)
 868			nf_nat_helper_put(params->helper);
 869#endif
 870		nf_conntrack_helper_put(params->helper);
 871	}
 872	if (params->ct_ft)
 873		tcf_ct_flow_table_put(params->ct_ft);
 874	if (params->tmpl) {
 875		if (params->put_labels)
 876			nf_connlabels_put(nf_ct_net(params->tmpl));
 877
 878		nf_ct_put(params->tmpl);
 879	}
 880
 881	kfree(params);
 882}
 883
 884static void tcf_ct_params_free_rcu(struct rcu_head *head)
 885{
 886	struct tcf_ct_params *params;
 887
 888	params = container_of(head, struct tcf_ct_params, rcu);
 889	tcf_ct_params_free(params);
 890}
 891
 892static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
 893{
 894#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
 895	u32 new_mark;
 896
 897	if (!mask)
 898		return;
 899
 900	new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
 901	if (READ_ONCE(ct->mark) != new_mark) {
 902		WRITE_ONCE(ct->mark, new_mark);
 903		if (nf_ct_is_confirmed(ct))
 904			nf_conntrack_event_cache(IPCT_MARK, ct);
 905	}
 906#endif
 907}
 908
 909static void tcf_ct_act_set_labels(struct nf_conn *ct,
 910				  u32 *labels,
 911				  u32 *labels_m)
 912{
 913#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
 914	size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
 915
 916	if (!memchr_inv(labels_m, 0, labels_sz))
 917		return;
 918
 919	nf_connlabels_replace(ct, labels, labels_m, 4);
 920#endif
 921}
 922
 923static int tcf_ct_act_nat(struct sk_buff *skb,
 924			  struct nf_conn *ct,
 925			  enum ip_conntrack_info ctinfo,
 926			  int ct_action,
 927			  struct nf_nat_range2 *range,
 928			  bool commit)
 929{
 930#if IS_ENABLED(CONFIG_NF_NAT)
 931	int err, action = 0;
 932
 933	if (!(ct_action & TCA_CT_ACT_NAT))
 934		return NF_ACCEPT;
 935	if (ct_action & TCA_CT_ACT_NAT_SRC)
 936		action |= BIT(NF_NAT_MANIP_SRC);
 937	if (ct_action & TCA_CT_ACT_NAT_DST)
 938		action |= BIT(NF_NAT_MANIP_DST);
 939
 940	err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
 941
 942	if (action & BIT(NF_NAT_MANIP_SRC))
 943		tc_skb_cb(skb)->post_ct_snat = 1;
 944	if (action & BIT(NF_NAT_MANIP_DST))
 945		tc_skb_cb(skb)->post_ct_dnat = 1;
 946
 947	return err;
 948#else
 949	return NF_ACCEPT;
 950#endif
 951}
 952
 953TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
 954				 struct tcf_result *res)
 955{
 956	struct net *net = dev_net(skb->dev);
 957	enum ip_conntrack_info ctinfo;
 958	struct tcf_ct *c = to_ct(a);
 959	struct nf_conn *tmpl = NULL;
 960	struct nf_hook_state state;
 961	bool cached, commit, clear;
 962	int nh_ofs, err, retval;
 963	struct tcf_ct_params *p;
 964	bool add_helper = false;
 965	bool skip_add = false;
 966	bool defrag = false;
 967	struct nf_conn *ct;
 968	u8 family;
 969
 970	p = rcu_dereference_bh(c->params);
 971
 972	retval = READ_ONCE(c->tcf_action);
 973	commit = p->ct_action & TCA_CT_ACT_COMMIT;
 974	clear = p->ct_action & TCA_CT_ACT_CLEAR;
 975	tmpl = p->tmpl;
 976
 977	tcf_lastuse_update(&c->tcf_tm);
 978	tcf_action_update_bstats(&c->common, skb);
 979
 980	if (clear) {
 981		tc_skb_cb(skb)->post_ct = false;
 982		ct = nf_ct_get(skb, &ctinfo);
 983		if (ct) {
 984			nf_ct_put(ct);
 985			nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 986		}
 987
 988		goto out_clear;
 989	}
 990
 991	family = tcf_ct_skb_nf_family(skb);
 992	if (family == NFPROTO_UNSPEC)
 993		goto drop;
 994
 995	/* The conntrack module expects to be working at L3.
 996	 * We also try to pull the IPv4/6 header to linear area
 997	 */
 998	nh_ofs = skb_network_offset(skb);
 999	skb_pull_rcsum(skb, nh_ofs);
1000	err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
1001	if (err)
1002		goto out_frag;
1003
1004	err = nf_ct_skb_network_trim(skb, family);
1005	if (err)
1006		goto drop;
1007
1008	/* If we are recirculating packets to match on ct fields and
1009	 * committing with a separate ct action, then we don't need to
1010	 * actually run the packet through conntrack twice unless it's for a
1011	 * different zone.
1012	 */
1013	cached = tcf_ct_skb_nfct_cached(net, skb, p);
1014	if (!cached) {
1015		if (tcf_ct_flow_table_lookup(p, skb, family)) {
1016			skip_add = true;
1017			goto do_nat;
1018		}
1019
1020		/* Associate skb with specified zone. */
1021		if (tmpl) {
1022			nf_conntrack_put(skb_nfct(skb));
1023			nf_conntrack_get(&tmpl->ct_general);
1024			nf_ct_set(skb, tmpl, IP_CT_NEW);
1025		}
1026
1027		state.hook = NF_INET_PRE_ROUTING;
1028		state.net = net;
1029		state.pf = family;
1030		err = nf_conntrack_in(skb, &state);
1031		if (err != NF_ACCEPT)
1032			goto out_push;
1033	}
1034
1035do_nat:
1036	ct = nf_ct_get(skb, &ctinfo);
1037	if (!ct)
1038		goto out_push;
1039	nf_ct_deliver_cached_events(ct);
1040	nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1041
1042	err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1043	if (err != NF_ACCEPT)
1044		goto drop;
1045
1046	if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
1047		err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
1048		if (err)
1049			goto drop;
1050		add_helper = true;
1051		if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) {
1052			if (!nfct_seqadj_ext_add(ct))
1053				goto drop;
1054		}
1055	}
1056
1057	if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
1058		if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT)
1059			goto drop;
1060	}
1061
1062	if (commit) {
1063		tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1064		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1065
1066		if (!nf_ct_is_confirmed(ct))
1067			nf_conn_act_ct_ext_add(skb, ct, ctinfo);
1068
1069		/* This will take care of sending queued events
1070		 * even if the connection is already confirmed.
1071		 */
1072		if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1073			goto drop;
1074	}
1075
1076	if (!skip_add)
1077		tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1078
1079out_push:
1080	skb_push_rcsum(skb, nh_ofs);
1081
1082	tc_skb_cb(skb)->post_ct = true;
1083	tc_skb_cb(skb)->zone = p->zone;
1084out_clear:
1085	if (defrag)
1086		qdisc_skb_cb(skb)->pkt_len = skb->len;
1087	return retval;
1088
1089out_frag:
1090	if (err != -EINPROGRESS)
1091		tcf_action_inc_drop_qstats(&c->common);
1092	return TC_ACT_CONSUMED;
1093
1094drop:
1095	tcf_action_inc_drop_qstats(&c->common);
1096	return TC_ACT_SHOT;
1097}
1098
1099static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1100	[TCA_CT_ACTION] = { .type = NLA_U16 },
1101	[TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1102	[TCA_CT_ZONE] = { .type = NLA_U16 },
1103	[TCA_CT_MARK] = { .type = NLA_U32 },
1104	[TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1105	[TCA_CT_LABELS] = { .type = NLA_BINARY,
1106			    .len = 128 / BITS_PER_BYTE },
1107	[TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1108				 .len = 128 / BITS_PER_BYTE },
1109	[TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1110	[TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1111	[TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1112	[TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1113	[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1114	[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1115	[TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN },
1116	[TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 },
1117	[TCA_CT_HELPER_PROTO] = { .type = NLA_U8 },
1118};
1119
1120static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1121				  struct tc_ct *parm,
1122				  struct nlattr **tb,
1123				  struct netlink_ext_ack *extack)
1124{
1125	struct nf_nat_range2 *range;
1126
1127	if (!(p->ct_action & TCA_CT_ACT_NAT))
1128		return 0;
1129
1130	if (!IS_ENABLED(CONFIG_NF_NAT)) {
1131		NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1132		return -EOPNOTSUPP;
1133	}
1134
1135	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1136		return 0;
1137
1138	if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1139	    (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1140		NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1141		return -EOPNOTSUPP;
1142	}
1143
1144	range = &p->range;
1145	if (tb[TCA_CT_NAT_IPV4_MIN]) {
1146		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1147
1148		p->ipv4_range = true;
1149		range->flags |= NF_NAT_RANGE_MAP_IPS;
1150		range->min_addr.ip =
1151			nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1152
1153		range->max_addr.ip = max_attr ?
1154				     nla_get_in_addr(max_attr) :
1155				     range->min_addr.ip;
1156	} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1157		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1158
1159		p->ipv4_range = false;
1160		range->flags |= NF_NAT_RANGE_MAP_IPS;
1161		range->min_addr.in6 =
1162			nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1163
1164		range->max_addr.in6 = max_attr ?
1165				      nla_get_in6_addr(max_attr) :
1166				      range->min_addr.in6;
1167	}
1168
1169	if (tb[TCA_CT_NAT_PORT_MIN]) {
1170		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1171		range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1172
1173		range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1174				       nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1175				       range->min_proto.all;
1176	}
1177
1178	return 0;
1179}
1180
1181static void tcf_ct_set_key_val(struct nlattr **tb,
1182			       void *val, int val_type,
1183			       void *mask, int mask_type,
1184			       int len)
1185{
1186	if (!tb[val_type])
1187		return;
1188	nla_memcpy(val, tb[val_type], len);
1189
1190	if (!mask)
1191		return;
1192
1193	if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1194		memset(mask, 0xff, len);
1195	else
1196		nla_memcpy(mask, tb[mask_type], len);
1197}
1198
1199static int tcf_ct_fill_params(struct net *net,
1200			      struct tcf_ct_params *p,
1201			      struct tc_ct *parm,
1202			      struct nlattr **tb,
1203			      struct netlink_ext_ack *extack)
1204{
1205	struct nf_conntrack_zone zone;
1206	int err, family, proto, len;
1207	bool put_labels = false;
1208	struct nf_conn *tmpl;
1209	char *name;
1210
1211	p->zone = NF_CT_DEFAULT_ZONE_ID;
1212
1213	tcf_ct_set_key_val(tb,
1214			   &p->ct_action, TCA_CT_ACTION,
1215			   NULL, TCA_CT_UNSPEC,
1216			   sizeof(p->ct_action));
1217
1218	if (p->ct_action & TCA_CT_ACT_CLEAR)
1219		return 0;
1220
1221	err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1222	if (err)
1223		return err;
1224
1225	if (tb[TCA_CT_MARK]) {
1226		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1227			NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1228			return -EOPNOTSUPP;
1229		}
1230		tcf_ct_set_key_val(tb,
1231				   &p->mark, TCA_CT_MARK,
1232				   &p->mark_mask, TCA_CT_MARK_MASK,
1233				   sizeof(p->mark));
1234	}
1235
1236	if (tb[TCA_CT_LABELS]) {
1237		unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1238
1239		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1240			NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1241			return -EOPNOTSUPP;
1242		}
1243
1244		if (nf_connlabels_get(net, n_bits - 1)) {
1245			NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1246			return -EOPNOTSUPP;
1247		} else {
1248			put_labels = true;
1249		}
1250
1251		tcf_ct_set_key_val(tb,
1252				   p->labels, TCA_CT_LABELS,
1253				   p->labels_mask, TCA_CT_LABELS_MASK,
1254				   sizeof(p->labels));
1255	}
1256
1257	if (tb[TCA_CT_ZONE]) {
1258		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1259			NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1260			return -EOPNOTSUPP;
1261		}
1262
1263		tcf_ct_set_key_val(tb,
1264				   &p->zone, TCA_CT_ZONE,
1265				   NULL, TCA_CT_UNSPEC,
1266				   sizeof(p->zone));
1267	}
1268
1269	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1270	tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1271	if (!tmpl) {
1272		NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1273		return -ENOMEM;
1274	}
1275	p->tmpl = tmpl;
1276	if (tb[TCA_CT_HELPER_NAME]) {
1277		name = nla_data(tb[TCA_CT_HELPER_NAME]);
1278		len = nla_len(tb[TCA_CT_HELPER_NAME]);
1279		if (len > 16 || name[len - 1] != '\0') {
1280			NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name.");
1281			err = -EINVAL;
1282			goto err;
1283		}
1284		family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
1285		proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
1286		err = nf_ct_add_helper(tmpl, name, family, proto,
1287				       p->ct_action & TCA_CT_ACT_NAT, &p->helper);
1288		if (err) {
1289			NL_SET_ERR_MSG_MOD(extack, "Failed to add helper");
1290			goto err;
1291		}
1292	}
1293
1294	p->put_labels = put_labels;
1295
1296	if (p->ct_action & TCA_CT_ACT_COMMIT)
1297		__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1298	return 0;
1299err:
1300	if (put_labels)
1301		nf_connlabels_put(net);
1302
1303	nf_ct_put(p->tmpl);
1304	p->tmpl = NULL;
1305	return err;
1306}
1307
1308static int tcf_ct_init(struct net *net, struct nlattr *nla,
1309		       struct nlattr *est, struct tc_action **a,
1310		       struct tcf_proto *tp, u32 flags,
1311		       struct netlink_ext_ack *extack)
1312{
1313	struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
1314	bool bind = flags & TCA_ACT_FLAGS_BIND;
1315	struct tcf_ct_params *params = NULL;
1316	struct nlattr *tb[TCA_CT_MAX + 1];
1317	struct tcf_chain *goto_ch = NULL;
1318	struct tc_ct *parm;
1319	struct tcf_ct *c;
1320	int err, res = 0;
1321	u32 index;
1322
1323	if (!nla) {
1324		NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1325		return -EINVAL;
1326	}
1327
1328	err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1329	if (err < 0)
1330		return err;
1331
1332	if (!tb[TCA_CT_PARMS]) {
1333		NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1334		return -EINVAL;
1335	}
1336	parm = nla_data(tb[TCA_CT_PARMS]);
1337	index = parm->index;
1338	err = tcf_idr_check_alloc(tn, &index, a, bind);
1339	if (err < 0)
1340		return err;
1341
1342	if (!err) {
1343		err = tcf_idr_create_from_flags(tn, index, est, a,
1344						&act_ct_ops, bind, flags);
1345		if (err) {
1346			tcf_idr_cleanup(tn, index);
1347			return err;
1348		}
1349		res = ACT_P_CREATED;
1350	} else {
1351		if (bind)
1352			return ACT_P_BOUND;
1353
1354		if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1355			tcf_idr_release(*a, bind);
1356			return -EEXIST;
1357		}
1358	}
1359	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1360	if (err < 0)
1361		goto cleanup;
1362
1363	c = to_ct(*a);
1364
1365	params = kzalloc(sizeof(*params), GFP_KERNEL);
1366	if (unlikely(!params)) {
1367		err = -ENOMEM;
1368		goto cleanup;
1369	}
1370
1371	err = tcf_ct_fill_params(net, params, parm, tb, extack);
1372	if (err)
1373		goto cleanup;
1374
1375	err = tcf_ct_flow_table_get(net, params);
1376	if (err)
1377		goto cleanup;
1378
1379	spin_lock_bh(&c->tcf_lock);
1380	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1381	params = rcu_replace_pointer(c->params, params,
1382				     lockdep_is_held(&c->tcf_lock));
1383	spin_unlock_bh(&c->tcf_lock);
1384
1385	if (goto_ch)
1386		tcf_chain_put_by_act(goto_ch);
1387	if (params)
1388		call_rcu(&params->rcu, tcf_ct_params_free_rcu);
1389
1390	return res;
1391
1392cleanup:
1393	if (goto_ch)
1394		tcf_chain_put_by_act(goto_ch);
1395	if (params)
1396		tcf_ct_params_free(params);
1397	tcf_idr_release(*a, bind);
1398	return err;
1399}
1400
1401static void tcf_ct_cleanup(struct tc_action *a)
1402{
1403	struct tcf_ct_params *params;
1404	struct tcf_ct *c = to_ct(a);
1405
1406	params = rcu_dereference_protected(c->params, 1);
1407	if (params)
1408		call_rcu(&params->rcu, tcf_ct_params_free_rcu);
1409}
1410
1411static int tcf_ct_dump_key_val(struct sk_buff *skb,
1412			       void *val, int val_type,
1413			       void *mask, int mask_type,
1414			       int len)
1415{
1416	int err;
1417
1418	if (mask && !memchr_inv(mask, 0, len))
1419		return 0;
1420
1421	err = nla_put(skb, val_type, len, val);
1422	if (err)
1423		return err;
1424
1425	if (mask_type != TCA_CT_UNSPEC) {
1426		err = nla_put(skb, mask_type, len, mask);
1427		if (err)
1428			return err;
1429	}
1430
1431	return 0;
1432}
1433
1434static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1435{
1436	struct nf_nat_range2 *range = &p->range;
1437
1438	if (!(p->ct_action & TCA_CT_ACT_NAT))
1439		return 0;
1440
1441	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1442		return 0;
1443
1444	if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1445		if (p->ipv4_range) {
1446			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1447					    range->min_addr.ip))
1448				return -1;
1449			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1450					    range->max_addr.ip))
1451				return -1;
1452		} else {
1453			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1454					     &range->min_addr.in6))
1455				return -1;
1456			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1457					     &range->max_addr.in6))
1458				return -1;
1459		}
1460	}
1461
1462	if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1463		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1464				 range->min_proto.all))
1465			return -1;
1466		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1467				 range->max_proto.all))
1468			return -1;
1469	}
1470
1471	return 0;
1472}
1473
1474static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper)
1475{
1476	if (!helper)
1477		return 0;
1478
1479	if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) ||
1480	    nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) ||
1481	    nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum))
1482		return -1;
1483
1484	return 0;
1485}
1486
1487static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1488			      int bind, int ref)
1489{
1490	unsigned char *b = skb_tail_pointer(skb);
1491	struct tcf_ct *c = to_ct(a);
1492	struct tcf_ct_params *p;
1493
1494	struct tc_ct opt = {
1495		.index   = c->tcf_index,
1496		.refcnt  = refcount_read(&c->tcf_refcnt) - ref,
1497		.bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1498	};
1499	struct tcf_t t;
1500
1501	spin_lock_bh(&c->tcf_lock);
1502	p = rcu_dereference_protected(c->params,
1503				      lockdep_is_held(&c->tcf_lock));
1504	opt.action = c->tcf_action;
1505
1506	if (tcf_ct_dump_key_val(skb,
1507				&p->ct_action, TCA_CT_ACTION,
1508				NULL, TCA_CT_UNSPEC,
1509				sizeof(p->ct_action)))
1510		goto nla_put_failure;
1511
1512	if (p->ct_action & TCA_CT_ACT_CLEAR)
1513		goto skip_dump;
1514
1515	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1516	    tcf_ct_dump_key_val(skb,
1517				&p->mark, TCA_CT_MARK,
1518				&p->mark_mask, TCA_CT_MARK_MASK,
1519				sizeof(p->mark)))
1520		goto nla_put_failure;
1521
1522	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1523	    tcf_ct_dump_key_val(skb,
1524				p->labels, TCA_CT_LABELS,
1525				p->labels_mask, TCA_CT_LABELS_MASK,
1526				sizeof(p->labels)))
1527		goto nla_put_failure;
1528
1529	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1530	    tcf_ct_dump_key_val(skb,
1531				&p->zone, TCA_CT_ZONE,
1532				NULL, TCA_CT_UNSPEC,
1533				sizeof(p->zone)))
1534		goto nla_put_failure;
1535
1536	if (tcf_ct_dump_nat(skb, p))
1537		goto nla_put_failure;
1538
1539	if (tcf_ct_dump_helper(skb, p->helper))
1540		goto nla_put_failure;
1541
1542skip_dump:
1543	if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1544		goto nla_put_failure;
1545
1546	tcf_tm_dump(&t, &c->tcf_tm);
1547	if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1548		goto nla_put_failure;
1549	spin_unlock_bh(&c->tcf_lock);
1550
1551	return skb->len;
1552nla_put_failure:
1553	spin_unlock_bh(&c->tcf_lock);
1554	nlmsg_trim(skb, b);
1555	return -1;
1556}
1557
1558static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1559			     u64 drops, u64 lastuse, bool hw)
1560{
1561	struct tcf_ct *c = to_ct(a);
1562
1563	tcf_action_update_stats(a, bytes, packets, drops, hw);
1564	c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1565}
1566
1567static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1568				    u32 *index_inc, bool bind,
1569				    struct netlink_ext_ack *extack)
1570{
1571	if (bind) {
1572		struct flow_action_entry *entry = entry_data;
1573
1574		if (tcf_ct_helper(act))
1575			return -EOPNOTSUPP;
1576
1577		entry->id = FLOW_ACTION_CT;
1578		entry->ct.action = tcf_ct_action(act);
1579		entry->ct.zone = tcf_ct_zone(act);
1580		entry->ct.flow_table = tcf_ct_ft(act);
1581		*index_inc = 1;
1582	} else {
1583		struct flow_offload_action *fl_action = entry_data;
1584
1585		fl_action->id = FLOW_ACTION_CT;
1586	}
1587
1588	return 0;
1589}
1590
1591static struct tc_action_ops act_ct_ops = {
1592	.kind		=	"ct",
1593	.id		=	TCA_ID_CT,
1594	.owner		=	THIS_MODULE,
1595	.act		=	tcf_ct_act,
1596	.dump		=	tcf_ct_dump,
1597	.init		=	tcf_ct_init,
1598	.cleanup	=	tcf_ct_cleanup,
1599	.stats_update	=	tcf_stats_update,
1600	.offload_act_setup =	tcf_ct_offload_act_setup,
1601	.size		=	sizeof(struct tcf_ct),
1602};
1603
1604static __net_init int ct_init_net(struct net *net)
1605{
1606	struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1607
1608	return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1609}
1610
1611static void __net_exit ct_exit_net(struct list_head *net_list)
1612{
1613	tc_action_net_exit(net_list, act_ct_ops.net_id);
1614}
1615
1616static struct pernet_operations ct_net_ops = {
1617	.init = ct_init_net,
1618	.exit_batch = ct_exit_net,
1619	.id   = &act_ct_ops.net_id,
1620	.size = sizeof(struct tc_ct_action_net),
1621};
1622
1623static int __init ct_init_module(void)
1624{
1625	int err;
1626
1627	act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1628	if (!act_ct_wq)
1629		return -ENOMEM;
1630
1631	err = tcf_ct_flow_tables_init();
1632	if (err)
1633		goto err_tbl_init;
1634
1635	err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1636	if (err)
1637		goto err_register;
1638
1639	static_branch_inc(&tcf_frag_xmit_count);
1640
1641	return 0;
1642
1643err_register:
1644	tcf_ct_flow_tables_uninit();
1645err_tbl_init:
1646	destroy_workqueue(act_ct_wq);
1647	return err;
1648}
1649
1650static void __exit ct_cleanup_module(void)
1651{
1652	static_branch_dec(&tcf_frag_xmit_count);
1653	tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1654	tcf_ct_flow_tables_uninit();
1655	destroy_workqueue(act_ct_wq);
1656}
1657
1658module_init(ct_init_module);
1659module_exit(ct_cleanup_module);
1660MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1661MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1662MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1663MODULE_DESCRIPTION("Connection tracking action");
1664MODULE_LICENSE("GPL v2");