Loading...
Note: File does not exist in v3.5.6.
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/rhashtable.h>
19#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27#include <net/tc_wrapper.h>
28
29#include <net/netfilter/nf_flow_table.h>
30#include <net/netfilter/nf_conntrack.h>
31#include <net/netfilter/nf_conntrack_core.h>
32#include <net/netfilter/nf_conntrack_zones.h>
33#include <net/netfilter/nf_conntrack_helper.h>
34#include <net/netfilter/nf_conntrack_acct.h>
35#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
36#include <net/netfilter/nf_conntrack_act_ct.h>
37#include <net/netfilter/nf_conntrack_seqadj.h>
38#include <uapi/linux/netfilter/nf_nat.h>
39
40static struct workqueue_struct *act_ct_wq;
41static struct rhashtable zones_ht;
42static DEFINE_MUTEX(zones_mutex);
43
44struct tcf_ct_flow_table {
45 struct rhash_head node; /* In zones tables */
46
47 struct rcu_work rwork;
48 struct nf_flowtable nf_ft;
49 refcount_t ref;
50 u16 zone;
51
52 bool dying;
53};
54
55static const struct rhashtable_params zones_params = {
56 .head_offset = offsetof(struct tcf_ct_flow_table, node),
57 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
58 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
59 .automatic_shrinking = true,
60};
61
62static struct flow_action_entry *
63tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
64{
65 int i = flow_action->num_entries++;
66
67 return &flow_action->entries[i];
68}
69
70static void tcf_ct_add_mangle_action(struct flow_action *action,
71 enum flow_action_mangle_base htype,
72 u32 offset,
73 u32 mask,
74 u32 val)
75{
76 struct flow_action_entry *entry;
77
78 entry = tcf_ct_flow_table_flow_action_get_next(action);
79 entry->id = FLOW_ACTION_MANGLE;
80 entry->mangle.htype = htype;
81 entry->mangle.mask = ~mask;
82 entry->mangle.offset = offset;
83 entry->mangle.val = val;
84}
85
86/* The following nat helper functions check if the inverted reverse tuple
87 * (target) is different then the current dir tuple - meaning nat for ports
88 * and/or ip is needed, and add the relevant mangle actions.
89 */
90static void
91tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
92 struct nf_conntrack_tuple target,
93 struct flow_action *action)
94{
95 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
96 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
97 offsetof(struct iphdr, saddr),
98 0xFFFFFFFF,
99 be32_to_cpu(target.src.u3.ip));
100 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
101 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
102 offsetof(struct iphdr, daddr),
103 0xFFFFFFFF,
104 be32_to_cpu(target.dst.u3.ip));
105}
106
107static void
108tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
109 union nf_inet_addr *addr,
110 u32 offset)
111{
112 int i;
113
114 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
115 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
116 i * sizeof(u32) + offset,
117 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
118}
119
120static void
121tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
122 struct nf_conntrack_tuple target,
123 struct flow_action *action)
124{
125 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
126 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
127 offsetof(struct ipv6hdr,
128 saddr));
129 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
130 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
131 offsetof(struct ipv6hdr,
132 daddr));
133}
134
135static void
136tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
137 struct nf_conntrack_tuple target,
138 struct flow_action *action)
139{
140 __be16 target_src = target.src.u.tcp.port;
141 __be16 target_dst = target.dst.u.tcp.port;
142
143 if (target_src != tuple->src.u.tcp.port)
144 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
145 offsetof(struct tcphdr, source),
146 0xFFFF, be16_to_cpu(target_src));
147 if (target_dst != tuple->dst.u.tcp.port)
148 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
149 offsetof(struct tcphdr, dest),
150 0xFFFF, be16_to_cpu(target_dst));
151}
152
153static void
154tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
155 struct nf_conntrack_tuple target,
156 struct flow_action *action)
157{
158 __be16 target_src = target.src.u.udp.port;
159 __be16 target_dst = target.dst.u.udp.port;
160
161 if (target_src != tuple->src.u.udp.port)
162 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
163 offsetof(struct udphdr, source),
164 0xFFFF, be16_to_cpu(target_src));
165 if (target_dst != tuple->dst.u.udp.port)
166 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
167 offsetof(struct udphdr, dest),
168 0xFFFF, be16_to_cpu(target_dst));
169}
170
171static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
172 enum ip_conntrack_dir dir,
173 struct flow_action *action)
174{
175 struct nf_conn_labels *ct_labels;
176 struct flow_action_entry *entry;
177 enum ip_conntrack_info ctinfo;
178 u32 *act_ct_labels;
179
180 entry = tcf_ct_flow_table_flow_action_get_next(action);
181 entry->id = FLOW_ACTION_CT_METADATA;
182#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
183 entry->ct_metadata.mark = READ_ONCE(ct->mark);
184#endif
185 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
186 IP_CT_ESTABLISHED_REPLY;
187 /* aligns with the CT reference on the SKB nf_ct_set */
188 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
189 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
190
191 act_ct_labels = entry->ct_metadata.labels;
192 ct_labels = nf_ct_labels_find(ct);
193 if (ct_labels)
194 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
195 else
196 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
197}
198
199static int tcf_ct_flow_table_add_action_nat(struct net *net,
200 struct nf_conn *ct,
201 enum ip_conntrack_dir dir,
202 struct flow_action *action)
203{
204 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
205 struct nf_conntrack_tuple target;
206
207 if (!(ct->status & IPS_NAT_MASK))
208 return 0;
209
210 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
211
212 switch (tuple->src.l3num) {
213 case NFPROTO_IPV4:
214 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
215 action);
216 break;
217 case NFPROTO_IPV6:
218 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
219 action);
220 break;
221 default:
222 return -EOPNOTSUPP;
223 }
224
225 switch (nf_ct_protonum(ct)) {
226 case IPPROTO_TCP:
227 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
228 break;
229 case IPPROTO_UDP:
230 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
231 break;
232 default:
233 return -EOPNOTSUPP;
234 }
235
236 return 0;
237}
238
239static int tcf_ct_flow_table_fill_actions(struct net *net,
240 const struct flow_offload *flow,
241 enum flow_offload_tuple_dir tdir,
242 struct nf_flow_rule *flow_rule)
243{
244 struct flow_action *action = &flow_rule->rule->action;
245 int num_entries = action->num_entries;
246 struct nf_conn *ct = flow->ct;
247 enum ip_conntrack_dir dir;
248 int i, err;
249
250 switch (tdir) {
251 case FLOW_OFFLOAD_DIR_ORIGINAL:
252 dir = IP_CT_DIR_ORIGINAL;
253 break;
254 case FLOW_OFFLOAD_DIR_REPLY:
255 dir = IP_CT_DIR_REPLY;
256 break;
257 default:
258 return -EOPNOTSUPP;
259 }
260
261 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
262 if (err)
263 goto err_nat;
264
265 tcf_ct_flow_table_add_action_meta(ct, dir, action);
266 return 0;
267
268err_nat:
269 /* Clear filled actions */
270 for (i = num_entries; i < action->num_entries; i++)
271 memset(&action->entries[i], 0, sizeof(action->entries[i]));
272 action->num_entries = num_entries;
273
274 return err;
275}
276
277static struct nf_flowtable_type flowtable_ct = {
278 .action = tcf_ct_flow_table_fill_actions,
279 .owner = THIS_MODULE,
280};
281
282static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
283{
284 struct tcf_ct_flow_table *ct_ft;
285 int err = -ENOMEM;
286
287 mutex_lock(&zones_mutex);
288 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
289 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
290 goto out_unlock;
291
292 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
293 if (!ct_ft)
294 goto err_alloc;
295 refcount_set(&ct_ft->ref, 1);
296
297 ct_ft->zone = params->zone;
298 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
299 if (err)
300 goto err_insert;
301
302 ct_ft->nf_ft.type = &flowtable_ct;
303 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
304 NF_FLOWTABLE_COUNTER;
305 err = nf_flow_table_init(&ct_ft->nf_ft);
306 if (err)
307 goto err_init;
308 write_pnet(&ct_ft->nf_ft.net, net);
309
310 __module_get(THIS_MODULE);
311out_unlock:
312 params->ct_ft = ct_ft;
313 params->nf_ft = &ct_ft->nf_ft;
314 mutex_unlock(&zones_mutex);
315
316 return 0;
317
318err_init:
319 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
320err_insert:
321 kfree(ct_ft);
322err_alloc:
323 mutex_unlock(&zones_mutex);
324 return err;
325}
326
327static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
328{
329 struct flow_block_cb *block_cb, *tmp_cb;
330 struct tcf_ct_flow_table *ct_ft;
331 struct flow_block *block;
332
333 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
334 rwork);
335 nf_flow_table_free(&ct_ft->nf_ft);
336
337 /* Remove any remaining callbacks before cleanup */
338 block = &ct_ft->nf_ft.flow_block;
339 down_write(&ct_ft->nf_ft.flow_block_lock);
340 list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
341 list_del(&block_cb->list);
342 flow_block_cb_free(block_cb);
343 }
344 up_write(&ct_ft->nf_ft.flow_block_lock);
345 kfree(ct_ft);
346
347 module_put(THIS_MODULE);
348}
349
350static void tcf_ct_flow_table_put(struct tcf_ct_flow_table *ct_ft)
351{
352 if (refcount_dec_and_test(&ct_ft->ref)) {
353 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
354 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
355 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
356 }
357}
358
359static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
360 struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
361{
362 entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
363 entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
364}
365
366static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
367 struct nf_conn *ct,
368 bool tcp)
369{
370 struct nf_conn_act_ct_ext *act_ct_ext;
371 struct flow_offload *entry;
372 int err;
373
374 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
375 return;
376
377 entry = flow_offload_alloc(ct);
378 if (!entry) {
379 WARN_ON_ONCE(1);
380 goto err_alloc;
381 }
382
383 if (tcp) {
384 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
385 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
386 }
387
388 act_ct_ext = nf_conn_act_ct_ext_find(ct);
389 if (act_ct_ext) {
390 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
391 tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
392 }
393
394 err = flow_offload_add(&ct_ft->nf_ft, entry);
395 if (err)
396 goto err_add;
397
398 return;
399
400err_add:
401 flow_offload_free(entry);
402err_alloc:
403 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
404}
405
406static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
407 struct nf_conn *ct,
408 enum ip_conntrack_info ctinfo)
409{
410 bool tcp = false;
411
412 if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
413 !test_bit(IPS_ASSURED_BIT, &ct->status))
414 return;
415
416 switch (nf_ct_protonum(ct)) {
417 case IPPROTO_TCP:
418 tcp = true;
419 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
420 return;
421 break;
422 case IPPROTO_UDP:
423 break;
424#ifdef CONFIG_NF_CT_PROTO_GRE
425 case IPPROTO_GRE: {
426 struct nf_conntrack_tuple *tuple;
427
428 if (ct->status & IPS_NAT_MASK)
429 return;
430 tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
431 /* No support for GRE v1 */
432 if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
433 return;
434 break;
435 }
436#endif
437 default:
438 return;
439 }
440
441 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
442 ct->status & IPS_SEQ_ADJUST)
443 return;
444
445 tcf_ct_flow_table_add(ct_ft, ct, tcp);
446}
447
448static bool
449tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
450 struct flow_offload_tuple *tuple,
451 struct tcphdr **tcph)
452{
453 struct flow_ports *ports;
454 unsigned int thoff;
455 struct iphdr *iph;
456 size_t hdrsize;
457 u8 ipproto;
458
459 if (!pskb_network_may_pull(skb, sizeof(*iph)))
460 return false;
461
462 iph = ip_hdr(skb);
463 thoff = iph->ihl * 4;
464
465 if (ip_is_fragment(iph) ||
466 unlikely(thoff != sizeof(struct iphdr)))
467 return false;
468
469 ipproto = iph->protocol;
470 switch (ipproto) {
471 case IPPROTO_TCP:
472 hdrsize = sizeof(struct tcphdr);
473 break;
474 case IPPROTO_UDP:
475 hdrsize = sizeof(*ports);
476 break;
477#ifdef CONFIG_NF_CT_PROTO_GRE
478 case IPPROTO_GRE:
479 hdrsize = sizeof(struct gre_base_hdr);
480 break;
481#endif
482 default:
483 return false;
484 }
485
486 if (iph->ttl <= 1)
487 return false;
488
489 if (!pskb_network_may_pull(skb, thoff + hdrsize))
490 return false;
491
492 switch (ipproto) {
493 case IPPROTO_TCP:
494 *tcph = (void *)(skb_network_header(skb) + thoff);
495 fallthrough;
496 case IPPROTO_UDP:
497 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
498 tuple->src_port = ports->source;
499 tuple->dst_port = ports->dest;
500 break;
501 case IPPROTO_GRE: {
502 struct gre_base_hdr *greh;
503
504 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
505 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
506 return false;
507 break;
508 }
509 }
510
511 iph = ip_hdr(skb);
512
513 tuple->src_v4.s_addr = iph->saddr;
514 tuple->dst_v4.s_addr = iph->daddr;
515 tuple->l3proto = AF_INET;
516 tuple->l4proto = ipproto;
517
518 return true;
519}
520
521static bool
522tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
523 struct flow_offload_tuple *tuple,
524 struct tcphdr **tcph)
525{
526 struct flow_ports *ports;
527 struct ipv6hdr *ip6h;
528 unsigned int thoff;
529 size_t hdrsize;
530 u8 nexthdr;
531
532 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
533 return false;
534
535 ip6h = ipv6_hdr(skb);
536 thoff = sizeof(*ip6h);
537
538 nexthdr = ip6h->nexthdr;
539 switch (nexthdr) {
540 case IPPROTO_TCP:
541 hdrsize = sizeof(struct tcphdr);
542 break;
543 case IPPROTO_UDP:
544 hdrsize = sizeof(*ports);
545 break;
546#ifdef CONFIG_NF_CT_PROTO_GRE
547 case IPPROTO_GRE:
548 hdrsize = sizeof(struct gre_base_hdr);
549 break;
550#endif
551 default:
552 return false;
553 }
554
555 if (ip6h->hop_limit <= 1)
556 return false;
557
558 if (!pskb_network_may_pull(skb, thoff + hdrsize))
559 return false;
560
561 switch (nexthdr) {
562 case IPPROTO_TCP:
563 *tcph = (void *)(skb_network_header(skb) + thoff);
564 fallthrough;
565 case IPPROTO_UDP:
566 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
567 tuple->src_port = ports->source;
568 tuple->dst_port = ports->dest;
569 break;
570 case IPPROTO_GRE: {
571 struct gre_base_hdr *greh;
572
573 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff);
574 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0)
575 return false;
576 break;
577 }
578 }
579
580 ip6h = ipv6_hdr(skb);
581
582 tuple->src_v6 = ip6h->saddr;
583 tuple->dst_v6 = ip6h->daddr;
584 tuple->l3proto = AF_INET6;
585 tuple->l4proto = nexthdr;
586
587 return true;
588}
589
590static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
591 struct sk_buff *skb,
592 u8 family)
593{
594 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
595 struct flow_offload_tuple_rhash *tuplehash;
596 struct flow_offload_tuple tuple = {};
597 enum ip_conntrack_info ctinfo;
598 struct tcphdr *tcph = NULL;
599 struct flow_offload *flow;
600 struct nf_conn *ct;
601 u8 dir;
602
603 switch (family) {
604 case NFPROTO_IPV4:
605 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
606 return false;
607 break;
608 case NFPROTO_IPV6:
609 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
610 return false;
611 break;
612 default:
613 return false;
614 }
615
616 tuplehash = flow_offload_lookup(nf_ft, &tuple);
617 if (!tuplehash)
618 return false;
619
620 dir = tuplehash->tuple.dir;
621 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
622 ct = flow->ct;
623
624 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
625 flow_offload_teardown(flow);
626 return false;
627 }
628
629 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
630 IP_CT_ESTABLISHED_REPLY;
631
632 flow_offload_refresh(nf_ft, flow);
633 nf_conntrack_get(&ct->ct_general);
634 nf_ct_set(skb, ct, ctinfo);
635 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
636 nf_ct_acct_update(ct, dir, skb->len);
637
638 return true;
639}
640
641static int tcf_ct_flow_tables_init(void)
642{
643 return rhashtable_init(&zones_ht, &zones_params);
644}
645
646static void tcf_ct_flow_tables_uninit(void)
647{
648 rhashtable_destroy(&zones_ht);
649}
650
651static struct tc_action_ops act_ct_ops;
652
653struct tc_ct_action_net {
654 struct tc_action_net tn; /* Must be first */
655 bool labels;
656};
657
658/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
659static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
660 struct tcf_ct_params *p)
661{
662 enum ip_conntrack_info ctinfo;
663 struct nf_conn *ct;
664
665 ct = nf_ct_get(skb, &ctinfo);
666 if (!ct)
667 return false;
668 if (!net_eq(net, read_pnet(&ct->ct_net)))
669 goto drop_ct;
670 if (nf_ct_zone(ct)->id != p->zone)
671 goto drop_ct;
672 if (p->helper) {
673 struct nf_conn_help *help;
674
675 help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
676 if (help && rcu_access_pointer(help->helper) != p->helper)
677 goto drop_ct;
678 }
679
680 /* Force conntrack entry direction. */
681 if ((p->ct_action & TCA_CT_ACT_FORCE) &&
682 CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
683 if (nf_ct_is_confirmed(ct))
684 nf_ct_kill(ct);
685
686 goto drop_ct;
687 }
688
689 return true;
690
691drop_ct:
692 nf_ct_put(ct);
693 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
694
695 return false;
696}
697
698/* Trim the skb to the length specified by the IP/IPv6 header,
699 * removing any trailing lower-layer padding. This prepares the skb
700 * for higher-layer processing that assumes skb->len excludes padding
701 * (such as nf_ip_checksum). The caller needs to pull the skb to the
702 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
703 */
704static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
705{
706 unsigned int len;
707
708 switch (family) {
709 case NFPROTO_IPV4:
710 len = ntohs(ip_hdr(skb)->tot_len);
711 break;
712 case NFPROTO_IPV6:
713 len = sizeof(struct ipv6hdr)
714 + ntohs(ipv6_hdr(skb)->payload_len);
715 break;
716 default:
717 len = skb->len;
718 }
719
720 return pskb_trim_rcsum(skb, len);
721}
722
723static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
724{
725 u8 family = NFPROTO_UNSPEC;
726
727 switch (skb_protocol(skb, true)) {
728 case htons(ETH_P_IP):
729 family = NFPROTO_IPV4;
730 break;
731 case htons(ETH_P_IPV6):
732 family = NFPROTO_IPV6;
733 break;
734 default:
735 break;
736 }
737
738 return family;
739}
740
741static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
742{
743 unsigned int len;
744
745 len = skb_network_offset(skb) + sizeof(struct iphdr);
746 if (unlikely(skb->len < len))
747 return -EINVAL;
748 if (unlikely(!pskb_may_pull(skb, len)))
749 return -ENOMEM;
750
751 *frag = ip_is_fragment(ip_hdr(skb));
752 return 0;
753}
754
755static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
756{
757 unsigned int flags = 0, len, payload_ofs = 0;
758 unsigned short frag_off;
759 int nexthdr;
760
761 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
762 if (unlikely(skb->len < len))
763 return -EINVAL;
764 if (unlikely(!pskb_may_pull(skb, len)))
765 return -ENOMEM;
766
767 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
768 if (unlikely(nexthdr < 0))
769 return -EPROTO;
770
771 *frag = flags & IP6_FH_F_FRAG;
772 return 0;
773}
774
775static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
776 u8 family, u16 zone, bool *defrag)
777{
778 enum ip_conntrack_info ctinfo;
779 struct nf_conn *ct;
780 int err = 0;
781 bool frag;
782 u16 mru;
783
784 /* Previously seen (loopback)? Ignore. */
785 ct = nf_ct_get(skb, &ctinfo);
786 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
787 return 0;
788
789 if (family == NFPROTO_IPV4)
790 err = tcf_ct_ipv4_is_fragment(skb, &frag);
791 else
792 err = tcf_ct_ipv6_is_fragment(skb, &frag);
793 if (err || !frag)
794 return err;
795
796 skb_get(skb);
797 mru = tc_skb_cb(skb)->mru;
798
799 if (family == NFPROTO_IPV4) {
800 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
801
802 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
803 local_bh_disable();
804 err = ip_defrag(net, skb, user);
805 local_bh_enable();
806 if (err && err != -EINPROGRESS)
807 return err;
808
809 if (!err) {
810 *defrag = true;
811 mru = IPCB(skb)->frag_max_size;
812 }
813 } else { /* NFPROTO_IPV6 */
814#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
815 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
816
817 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
818 err = nf_ct_frag6_gather(net, skb, user);
819 if (err && err != -EINPROGRESS)
820 goto out_free;
821
822 if (!err) {
823 *defrag = true;
824 mru = IP6CB(skb)->frag_max_size;
825 }
826#else
827 err = -EOPNOTSUPP;
828 goto out_free;
829#endif
830 }
831
832 if (err != -EINPROGRESS)
833 tc_skb_cb(skb)->mru = mru;
834 skb_clear_hash(skb);
835 skb->ignore_df = 1;
836 return err;
837
838out_free:
839 kfree_skb(skb);
840 return err;
841}
842
843static void tcf_ct_params_free(struct tcf_ct_params *params)
844{
845 if (params->helper) {
846#if IS_ENABLED(CONFIG_NF_NAT)
847 if (params->ct_action & TCA_CT_ACT_NAT)
848 nf_nat_helper_put(params->helper);
849#endif
850 nf_conntrack_helper_put(params->helper);
851 }
852 if (params->ct_ft)
853 tcf_ct_flow_table_put(params->ct_ft);
854 if (params->tmpl)
855 nf_ct_put(params->tmpl);
856 kfree(params);
857}
858
859static void tcf_ct_params_free_rcu(struct rcu_head *head)
860{
861 struct tcf_ct_params *params;
862
863 params = container_of(head, struct tcf_ct_params, rcu);
864 tcf_ct_params_free(params);
865}
866
867static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
868{
869#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
870 u32 new_mark;
871
872 if (!mask)
873 return;
874
875 new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
876 if (READ_ONCE(ct->mark) != new_mark) {
877 WRITE_ONCE(ct->mark, new_mark);
878 if (nf_ct_is_confirmed(ct))
879 nf_conntrack_event_cache(IPCT_MARK, ct);
880 }
881#endif
882}
883
884static void tcf_ct_act_set_labels(struct nf_conn *ct,
885 u32 *labels,
886 u32 *labels_m)
887{
888#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
889 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
890
891 if (!memchr_inv(labels_m, 0, labels_sz))
892 return;
893
894 nf_connlabels_replace(ct, labels, labels_m, 4);
895#endif
896}
897
898static int tcf_ct_act_nat(struct sk_buff *skb,
899 struct nf_conn *ct,
900 enum ip_conntrack_info ctinfo,
901 int ct_action,
902 struct nf_nat_range2 *range,
903 bool commit)
904{
905#if IS_ENABLED(CONFIG_NF_NAT)
906 int err, action = 0;
907
908 if (!(ct_action & TCA_CT_ACT_NAT))
909 return NF_ACCEPT;
910 if (ct_action & TCA_CT_ACT_NAT_SRC)
911 action |= BIT(NF_NAT_MANIP_SRC);
912 if (ct_action & TCA_CT_ACT_NAT_DST)
913 action |= BIT(NF_NAT_MANIP_DST);
914
915 err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit);
916
917 if (action & BIT(NF_NAT_MANIP_SRC))
918 tc_skb_cb(skb)->post_ct_snat = 1;
919 if (action & BIT(NF_NAT_MANIP_DST))
920 tc_skb_cb(skb)->post_ct_dnat = 1;
921
922 return err;
923#else
924 return NF_ACCEPT;
925#endif
926}
927
928TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
929 struct tcf_result *res)
930{
931 struct net *net = dev_net(skb->dev);
932 enum ip_conntrack_info ctinfo;
933 struct tcf_ct *c = to_ct(a);
934 struct nf_conn *tmpl = NULL;
935 struct nf_hook_state state;
936 bool cached, commit, clear;
937 int nh_ofs, err, retval;
938 struct tcf_ct_params *p;
939 bool add_helper = false;
940 bool skip_add = false;
941 bool defrag = false;
942 struct nf_conn *ct;
943 u8 family;
944
945 p = rcu_dereference_bh(c->params);
946
947 retval = READ_ONCE(c->tcf_action);
948 commit = p->ct_action & TCA_CT_ACT_COMMIT;
949 clear = p->ct_action & TCA_CT_ACT_CLEAR;
950 tmpl = p->tmpl;
951
952 tcf_lastuse_update(&c->tcf_tm);
953 tcf_action_update_bstats(&c->common, skb);
954
955 if (clear) {
956 tc_skb_cb(skb)->post_ct = false;
957 ct = nf_ct_get(skb, &ctinfo);
958 if (ct) {
959 nf_ct_put(ct);
960 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
961 }
962
963 goto out_clear;
964 }
965
966 family = tcf_ct_skb_nf_family(skb);
967 if (family == NFPROTO_UNSPEC)
968 goto drop;
969
970 /* The conntrack module expects to be working at L3.
971 * We also try to pull the IPv4/6 header to linear area
972 */
973 nh_ofs = skb_network_offset(skb);
974 skb_pull_rcsum(skb, nh_ofs);
975 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
976 if (err == -EINPROGRESS) {
977 retval = TC_ACT_STOLEN;
978 goto out_clear;
979 }
980 if (err)
981 goto drop;
982
983 err = tcf_ct_skb_network_trim(skb, family);
984 if (err)
985 goto drop;
986
987 /* If we are recirculating packets to match on ct fields and
988 * committing with a separate ct action, then we don't need to
989 * actually run the packet through conntrack twice unless it's for a
990 * different zone.
991 */
992 cached = tcf_ct_skb_nfct_cached(net, skb, p);
993 if (!cached) {
994 if (tcf_ct_flow_table_lookup(p, skb, family)) {
995 skip_add = true;
996 goto do_nat;
997 }
998
999 /* Associate skb with specified zone. */
1000 if (tmpl) {
1001 nf_conntrack_put(skb_nfct(skb));
1002 nf_conntrack_get(&tmpl->ct_general);
1003 nf_ct_set(skb, tmpl, IP_CT_NEW);
1004 }
1005
1006 state.hook = NF_INET_PRE_ROUTING;
1007 state.net = net;
1008 state.pf = family;
1009 err = nf_conntrack_in(skb, &state);
1010 if (err != NF_ACCEPT)
1011 goto out_push;
1012 }
1013
1014do_nat:
1015 ct = nf_ct_get(skb, &ctinfo);
1016 if (!ct)
1017 goto out_push;
1018 nf_ct_deliver_cached_events(ct);
1019 nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
1020
1021 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1022 if (err != NF_ACCEPT)
1023 goto drop;
1024
1025 if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) {
1026 err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC);
1027 if (err)
1028 goto drop;
1029 add_helper = true;
1030 if (p->ct_action & TCA_CT_ACT_NAT && !nfct_seqadj(ct)) {
1031 if (!nfct_seqadj_ext_add(ct))
1032 goto drop;
1033 }
1034 }
1035
1036 if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) {
1037 if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT)
1038 goto drop;
1039 }
1040
1041 if (commit) {
1042 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1043 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1044
1045 if (!nf_ct_is_confirmed(ct))
1046 nf_conn_act_ct_ext_add(ct);
1047
1048 /* This will take care of sending queued events
1049 * even if the connection is already confirmed.
1050 */
1051 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1052 goto drop;
1053 }
1054
1055 if (!skip_add)
1056 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1057
1058out_push:
1059 skb_push_rcsum(skb, nh_ofs);
1060
1061 tc_skb_cb(skb)->post_ct = true;
1062 tc_skb_cb(skb)->zone = p->zone;
1063out_clear:
1064 if (defrag)
1065 qdisc_skb_cb(skb)->pkt_len = skb->len;
1066 return retval;
1067
1068drop:
1069 tcf_action_inc_drop_qstats(&c->common);
1070 return TC_ACT_SHOT;
1071}
1072
1073static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1074 [TCA_CT_ACTION] = { .type = NLA_U16 },
1075 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1076 [TCA_CT_ZONE] = { .type = NLA_U16 },
1077 [TCA_CT_MARK] = { .type = NLA_U32 },
1078 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1079 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1080 .len = 128 / BITS_PER_BYTE },
1081 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1082 .len = 128 / BITS_PER_BYTE },
1083 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1084 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1085 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1086 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1087 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1088 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1089 [TCA_CT_HELPER_NAME] = { .type = NLA_STRING, .len = NF_CT_HELPER_NAME_LEN },
1090 [TCA_CT_HELPER_FAMILY] = { .type = NLA_U8 },
1091 [TCA_CT_HELPER_PROTO] = { .type = NLA_U8 },
1092};
1093
1094static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1095 struct tc_ct *parm,
1096 struct nlattr **tb,
1097 struct netlink_ext_ack *extack)
1098{
1099 struct nf_nat_range2 *range;
1100
1101 if (!(p->ct_action & TCA_CT_ACT_NAT))
1102 return 0;
1103
1104 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1105 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1106 return -EOPNOTSUPP;
1107 }
1108
1109 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1110 return 0;
1111
1112 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1113 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1114 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1115 return -EOPNOTSUPP;
1116 }
1117
1118 range = &p->range;
1119 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1120 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1121
1122 p->ipv4_range = true;
1123 range->flags |= NF_NAT_RANGE_MAP_IPS;
1124 range->min_addr.ip =
1125 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1126
1127 range->max_addr.ip = max_attr ?
1128 nla_get_in_addr(max_attr) :
1129 range->min_addr.ip;
1130 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1131 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1132
1133 p->ipv4_range = false;
1134 range->flags |= NF_NAT_RANGE_MAP_IPS;
1135 range->min_addr.in6 =
1136 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1137
1138 range->max_addr.in6 = max_attr ?
1139 nla_get_in6_addr(max_attr) :
1140 range->min_addr.in6;
1141 }
1142
1143 if (tb[TCA_CT_NAT_PORT_MIN]) {
1144 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1145 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1146
1147 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1148 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1149 range->min_proto.all;
1150 }
1151
1152 return 0;
1153}
1154
1155static void tcf_ct_set_key_val(struct nlattr **tb,
1156 void *val, int val_type,
1157 void *mask, int mask_type,
1158 int len)
1159{
1160 if (!tb[val_type])
1161 return;
1162 nla_memcpy(val, tb[val_type], len);
1163
1164 if (!mask)
1165 return;
1166
1167 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1168 memset(mask, 0xff, len);
1169 else
1170 nla_memcpy(mask, tb[mask_type], len);
1171}
1172
1173static int tcf_ct_fill_params(struct net *net,
1174 struct tcf_ct_params *p,
1175 struct tc_ct *parm,
1176 struct nlattr **tb,
1177 struct netlink_ext_ack *extack)
1178{
1179 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1180 struct nf_conntrack_zone zone;
1181 int err, family, proto, len;
1182 struct nf_conn *tmpl;
1183 char *name;
1184
1185 p->zone = NF_CT_DEFAULT_ZONE_ID;
1186
1187 tcf_ct_set_key_val(tb,
1188 &p->ct_action, TCA_CT_ACTION,
1189 NULL, TCA_CT_UNSPEC,
1190 sizeof(p->ct_action));
1191
1192 if (p->ct_action & TCA_CT_ACT_CLEAR)
1193 return 0;
1194
1195 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1196 if (err)
1197 return err;
1198
1199 if (tb[TCA_CT_MARK]) {
1200 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1201 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1202 return -EOPNOTSUPP;
1203 }
1204 tcf_ct_set_key_val(tb,
1205 &p->mark, TCA_CT_MARK,
1206 &p->mark_mask, TCA_CT_MARK_MASK,
1207 sizeof(p->mark));
1208 }
1209
1210 if (tb[TCA_CT_LABELS]) {
1211 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1212 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1213 return -EOPNOTSUPP;
1214 }
1215
1216 if (!tn->labels) {
1217 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1218 return -EOPNOTSUPP;
1219 }
1220 tcf_ct_set_key_val(tb,
1221 p->labels, TCA_CT_LABELS,
1222 p->labels_mask, TCA_CT_LABELS_MASK,
1223 sizeof(p->labels));
1224 }
1225
1226 if (tb[TCA_CT_ZONE]) {
1227 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1228 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1229 return -EOPNOTSUPP;
1230 }
1231
1232 tcf_ct_set_key_val(tb,
1233 &p->zone, TCA_CT_ZONE,
1234 NULL, TCA_CT_UNSPEC,
1235 sizeof(p->zone));
1236 }
1237
1238 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1239 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1240 if (!tmpl) {
1241 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1242 return -ENOMEM;
1243 }
1244 p->tmpl = tmpl;
1245 if (tb[TCA_CT_HELPER_NAME]) {
1246 name = nla_data(tb[TCA_CT_HELPER_NAME]);
1247 len = nla_len(tb[TCA_CT_HELPER_NAME]);
1248 if (len > 16 || name[len - 1] != '\0') {
1249 NL_SET_ERR_MSG_MOD(extack, "Failed to parse helper name.");
1250 err = -EINVAL;
1251 goto err;
1252 }
1253 family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
1254 proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
1255 err = nf_ct_add_helper(tmpl, name, family, proto,
1256 p->ct_action & TCA_CT_ACT_NAT, &p->helper);
1257 if (err) {
1258 NL_SET_ERR_MSG_MOD(extack, "Failed to add helper");
1259 goto err;
1260 }
1261 }
1262
1263 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1264 return 0;
1265err:
1266 nf_ct_put(p->tmpl);
1267 p->tmpl = NULL;
1268 return err;
1269}
1270
1271static int tcf_ct_init(struct net *net, struct nlattr *nla,
1272 struct nlattr *est, struct tc_action **a,
1273 struct tcf_proto *tp, u32 flags,
1274 struct netlink_ext_ack *extack)
1275{
1276 struct tc_action_net *tn = net_generic(net, act_ct_ops.net_id);
1277 bool bind = flags & TCA_ACT_FLAGS_BIND;
1278 struct tcf_ct_params *params = NULL;
1279 struct nlattr *tb[TCA_CT_MAX + 1];
1280 struct tcf_chain *goto_ch = NULL;
1281 struct tc_ct *parm;
1282 struct tcf_ct *c;
1283 int err, res = 0;
1284 u32 index;
1285
1286 if (!nla) {
1287 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1288 return -EINVAL;
1289 }
1290
1291 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1292 if (err < 0)
1293 return err;
1294
1295 if (!tb[TCA_CT_PARMS]) {
1296 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1297 return -EINVAL;
1298 }
1299 parm = nla_data(tb[TCA_CT_PARMS]);
1300 index = parm->index;
1301 err = tcf_idr_check_alloc(tn, &index, a, bind);
1302 if (err < 0)
1303 return err;
1304
1305 if (!err) {
1306 err = tcf_idr_create_from_flags(tn, index, est, a,
1307 &act_ct_ops, bind, flags);
1308 if (err) {
1309 tcf_idr_cleanup(tn, index);
1310 return err;
1311 }
1312 res = ACT_P_CREATED;
1313 } else {
1314 if (bind)
1315 return 0;
1316
1317 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
1318 tcf_idr_release(*a, bind);
1319 return -EEXIST;
1320 }
1321 }
1322 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1323 if (err < 0)
1324 goto cleanup;
1325
1326 c = to_ct(*a);
1327
1328 params = kzalloc(sizeof(*params), GFP_KERNEL);
1329 if (unlikely(!params)) {
1330 err = -ENOMEM;
1331 goto cleanup;
1332 }
1333
1334 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1335 if (err)
1336 goto cleanup;
1337
1338 err = tcf_ct_flow_table_get(net, params);
1339 if (err)
1340 goto cleanup;
1341
1342 spin_lock_bh(&c->tcf_lock);
1343 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1344 params = rcu_replace_pointer(c->params, params,
1345 lockdep_is_held(&c->tcf_lock));
1346 spin_unlock_bh(&c->tcf_lock);
1347
1348 if (goto_ch)
1349 tcf_chain_put_by_act(goto_ch);
1350 if (params)
1351 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1352
1353 return res;
1354
1355cleanup:
1356 if (goto_ch)
1357 tcf_chain_put_by_act(goto_ch);
1358 if (params)
1359 tcf_ct_params_free(params);
1360 tcf_idr_release(*a, bind);
1361 return err;
1362}
1363
1364static void tcf_ct_cleanup(struct tc_action *a)
1365{
1366 struct tcf_ct_params *params;
1367 struct tcf_ct *c = to_ct(a);
1368
1369 params = rcu_dereference_protected(c->params, 1);
1370 if (params)
1371 call_rcu(¶ms->rcu, tcf_ct_params_free_rcu);
1372}
1373
1374static int tcf_ct_dump_key_val(struct sk_buff *skb,
1375 void *val, int val_type,
1376 void *mask, int mask_type,
1377 int len)
1378{
1379 int err;
1380
1381 if (mask && !memchr_inv(mask, 0, len))
1382 return 0;
1383
1384 err = nla_put(skb, val_type, len, val);
1385 if (err)
1386 return err;
1387
1388 if (mask_type != TCA_CT_UNSPEC) {
1389 err = nla_put(skb, mask_type, len, mask);
1390 if (err)
1391 return err;
1392 }
1393
1394 return 0;
1395}
1396
1397static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1398{
1399 struct nf_nat_range2 *range = &p->range;
1400
1401 if (!(p->ct_action & TCA_CT_ACT_NAT))
1402 return 0;
1403
1404 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1405 return 0;
1406
1407 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1408 if (p->ipv4_range) {
1409 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1410 range->min_addr.ip))
1411 return -1;
1412 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1413 range->max_addr.ip))
1414 return -1;
1415 } else {
1416 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1417 &range->min_addr.in6))
1418 return -1;
1419 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1420 &range->max_addr.in6))
1421 return -1;
1422 }
1423 }
1424
1425 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1426 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1427 range->min_proto.all))
1428 return -1;
1429 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1430 range->max_proto.all))
1431 return -1;
1432 }
1433
1434 return 0;
1435}
1436
1437static int tcf_ct_dump_helper(struct sk_buff *skb, struct nf_conntrack_helper *helper)
1438{
1439 if (!helper)
1440 return 0;
1441
1442 if (nla_put_string(skb, TCA_CT_HELPER_NAME, helper->name) ||
1443 nla_put_u8(skb, TCA_CT_HELPER_FAMILY, helper->tuple.src.l3num) ||
1444 nla_put_u8(skb, TCA_CT_HELPER_PROTO, helper->tuple.dst.protonum))
1445 return -1;
1446
1447 return 0;
1448}
1449
1450static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1451 int bind, int ref)
1452{
1453 unsigned char *b = skb_tail_pointer(skb);
1454 struct tcf_ct *c = to_ct(a);
1455 struct tcf_ct_params *p;
1456
1457 struct tc_ct opt = {
1458 .index = c->tcf_index,
1459 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1460 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1461 };
1462 struct tcf_t t;
1463
1464 spin_lock_bh(&c->tcf_lock);
1465 p = rcu_dereference_protected(c->params,
1466 lockdep_is_held(&c->tcf_lock));
1467 opt.action = c->tcf_action;
1468
1469 if (tcf_ct_dump_key_val(skb,
1470 &p->ct_action, TCA_CT_ACTION,
1471 NULL, TCA_CT_UNSPEC,
1472 sizeof(p->ct_action)))
1473 goto nla_put_failure;
1474
1475 if (p->ct_action & TCA_CT_ACT_CLEAR)
1476 goto skip_dump;
1477
1478 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1479 tcf_ct_dump_key_val(skb,
1480 &p->mark, TCA_CT_MARK,
1481 &p->mark_mask, TCA_CT_MARK_MASK,
1482 sizeof(p->mark)))
1483 goto nla_put_failure;
1484
1485 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1486 tcf_ct_dump_key_val(skb,
1487 p->labels, TCA_CT_LABELS,
1488 p->labels_mask, TCA_CT_LABELS_MASK,
1489 sizeof(p->labels)))
1490 goto nla_put_failure;
1491
1492 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1493 tcf_ct_dump_key_val(skb,
1494 &p->zone, TCA_CT_ZONE,
1495 NULL, TCA_CT_UNSPEC,
1496 sizeof(p->zone)))
1497 goto nla_put_failure;
1498
1499 if (tcf_ct_dump_nat(skb, p))
1500 goto nla_put_failure;
1501
1502 if (tcf_ct_dump_helper(skb, p->helper))
1503 goto nla_put_failure;
1504
1505skip_dump:
1506 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1507 goto nla_put_failure;
1508
1509 tcf_tm_dump(&t, &c->tcf_tm);
1510 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1511 goto nla_put_failure;
1512 spin_unlock_bh(&c->tcf_lock);
1513
1514 return skb->len;
1515nla_put_failure:
1516 spin_unlock_bh(&c->tcf_lock);
1517 nlmsg_trim(skb, b);
1518 return -1;
1519}
1520
1521static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1522 u64 drops, u64 lastuse, bool hw)
1523{
1524 struct tcf_ct *c = to_ct(a);
1525
1526 tcf_action_update_stats(a, bytes, packets, drops, hw);
1527 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1528}
1529
1530static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
1531 u32 *index_inc, bool bind,
1532 struct netlink_ext_ack *extack)
1533{
1534 if (bind) {
1535 struct flow_action_entry *entry = entry_data;
1536
1537 entry->id = FLOW_ACTION_CT;
1538 entry->ct.action = tcf_ct_action(act);
1539 entry->ct.zone = tcf_ct_zone(act);
1540 entry->ct.flow_table = tcf_ct_ft(act);
1541 *index_inc = 1;
1542 } else {
1543 struct flow_offload_action *fl_action = entry_data;
1544
1545 fl_action->id = FLOW_ACTION_CT;
1546 }
1547
1548 return 0;
1549}
1550
1551static struct tc_action_ops act_ct_ops = {
1552 .kind = "ct",
1553 .id = TCA_ID_CT,
1554 .owner = THIS_MODULE,
1555 .act = tcf_ct_act,
1556 .dump = tcf_ct_dump,
1557 .init = tcf_ct_init,
1558 .cleanup = tcf_ct_cleanup,
1559 .stats_update = tcf_stats_update,
1560 .offload_act_setup = tcf_ct_offload_act_setup,
1561 .size = sizeof(struct tcf_ct),
1562};
1563
1564static __net_init int ct_init_net(struct net *net)
1565{
1566 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1567 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1568
1569 if (nf_connlabels_get(net, n_bits - 1)) {
1570 tn->labels = false;
1571 pr_err("act_ct: Failed to set connlabels length");
1572 } else {
1573 tn->labels = true;
1574 }
1575
1576 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1577}
1578
1579static void __net_exit ct_exit_net(struct list_head *net_list)
1580{
1581 struct net *net;
1582
1583 rtnl_lock();
1584 list_for_each_entry(net, net_list, exit_list) {
1585 struct tc_ct_action_net *tn = net_generic(net, act_ct_ops.net_id);
1586
1587 if (tn->labels)
1588 nf_connlabels_put(net);
1589 }
1590 rtnl_unlock();
1591
1592 tc_action_net_exit(net_list, act_ct_ops.net_id);
1593}
1594
1595static struct pernet_operations ct_net_ops = {
1596 .init = ct_init_net,
1597 .exit_batch = ct_exit_net,
1598 .id = &act_ct_ops.net_id,
1599 .size = sizeof(struct tc_ct_action_net),
1600};
1601
1602static int __init ct_init_module(void)
1603{
1604 int err;
1605
1606 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1607 if (!act_ct_wq)
1608 return -ENOMEM;
1609
1610 err = tcf_ct_flow_tables_init();
1611 if (err)
1612 goto err_tbl_init;
1613
1614 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1615 if (err)
1616 goto err_register;
1617
1618 static_branch_inc(&tcf_frag_xmit_count);
1619
1620 return 0;
1621
1622err_register:
1623 tcf_ct_flow_tables_uninit();
1624err_tbl_init:
1625 destroy_workqueue(act_ct_wq);
1626 return err;
1627}
1628
1629static void __exit ct_cleanup_module(void)
1630{
1631 static_branch_dec(&tcf_frag_xmit_count);
1632 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1633 tcf_ct_flow_tables_uninit();
1634 destroy_workqueue(act_ct_wq);
1635}
1636
1637module_init(ct_init_module);
1638module_exit(ct_cleanup_module);
1639MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1640MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1641MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1642MODULE_DESCRIPTION("Connection tracking action");
1643MODULE_LICENSE("GPL v2");