Loading...
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/rhashtable.h>
19#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27
28#include <net/netfilter/nf_flow_table.h>
29#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_zones.h>
32#include <net/netfilter/nf_conntrack_helper.h>
33#include <net/netfilter/nf_conntrack_acct.h>
34#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35#include <uapi/linux/netfilter/nf_nat.h>
36
37static struct workqueue_struct *act_ct_wq;
38static struct rhashtable zones_ht;
39static DEFINE_MUTEX(zones_mutex);
40
41struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
43
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
46 refcount_t ref;
47 u16 zone;
48
49 bool dying;
50};
51
52static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
57};
58
59static struct flow_action_entry *
60tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61{
62 int i = flow_action->num_entries++;
63
64 return &flow_action->entries[i];
65}
66
67static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
69 u32 offset,
70 u32 mask,
71 u32 val)
72{
73 struct flow_action_entry *entry;
74
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
81}
82
83/* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
86 */
87static void
88tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
91{
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
95 0xFFFFFFFF,
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
100 0xFFFFFFFF,
101 be32_to_cpu(target.dst.u3.ip));
102}
103
104static void
105tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
107 u32 offset)
108{
109 int i;
110
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115}
116
117static void
118tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
121{
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
125 saddr));
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
129 daddr));
130}
131
132static void
133tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
136{
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
139
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
148}
149
150static void
151tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
154{
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
157
158 if (target_src != tuple->src.u.udp.port)
159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
166}
167
168static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
171{
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
174 enum ip_conntrack_info ctinfo;
175 u32 *act_ct_labels;
176
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = ct->mark;
181#endif
182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186
187 act_ct_labels = entry->ct_metadata.labels;
188 ct_labels = nf_ct_labels_find(ct);
189 if (ct_labels)
190 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
191 else
192 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
193}
194
195static int tcf_ct_flow_table_add_action_nat(struct net *net,
196 struct nf_conn *ct,
197 enum ip_conntrack_dir dir,
198 struct flow_action *action)
199{
200 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
201 struct nf_conntrack_tuple target;
202
203 if (!(ct->status & IPS_NAT_MASK))
204 return 0;
205
206 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
207
208 switch (tuple->src.l3num) {
209 case NFPROTO_IPV4:
210 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
211 action);
212 break;
213 case NFPROTO_IPV6:
214 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
215 action);
216 break;
217 default:
218 return -EOPNOTSUPP;
219 }
220
221 switch (nf_ct_protonum(ct)) {
222 case IPPROTO_TCP:
223 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
224 break;
225 case IPPROTO_UDP:
226 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
227 break;
228 default:
229 return -EOPNOTSUPP;
230 }
231
232 return 0;
233}
234
235static int tcf_ct_flow_table_fill_actions(struct net *net,
236 const struct flow_offload *flow,
237 enum flow_offload_tuple_dir tdir,
238 struct nf_flow_rule *flow_rule)
239{
240 struct flow_action *action = &flow_rule->rule->action;
241 int num_entries = action->num_entries;
242 struct nf_conn *ct = flow->ct;
243 enum ip_conntrack_dir dir;
244 int i, err;
245
246 switch (tdir) {
247 case FLOW_OFFLOAD_DIR_ORIGINAL:
248 dir = IP_CT_DIR_ORIGINAL;
249 break;
250 case FLOW_OFFLOAD_DIR_REPLY:
251 dir = IP_CT_DIR_REPLY;
252 break;
253 default:
254 return -EOPNOTSUPP;
255 }
256
257 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
258 if (err)
259 goto err_nat;
260
261 tcf_ct_flow_table_add_action_meta(ct, dir, action);
262 return 0;
263
264err_nat:
265 /* Clear filled actions */
266 for (i = num_entries; i < action->num_entries; i++)
267 memset(&action->entries[i], 0, sizeof(action->entries[i]));
268 action->num_entries = num_entries;
269
270 return err;
271}
272
273static struct nf_flowtable_type flowtable_ct = {
274 .action = tcf_ct_flow_table_fill_actions,
275 .owner = THIS_MODULE,
276};
277
278static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
279{
280 struct tcf_ct_flow_table *ct_ft;
281 int err = -ENOMEM;
282
283 mutex_lock(&zones_mutex);
284 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
285 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
286 goto out_unlock;
287
288 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
289 if (!ct_ft)
290 goto err_alloc;
291 refcount_set(&ct_ft->ref, 1);
292
293 ct_ft->zone = params->zone;
294 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
295 if (err)
296 goto err_insert;
297
298 ct_ft->nf_ft.type = &flowtable_ct;
299 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
300 err = nf_flow_table_init(&ct_ft->nf_ft);
301 if (err)
302 goto err_init;
303
304 __module_get(THIS_MODULE);
305out_unlock:
306 params->ct_ft = ct_ft;
307 params->nf_ft = &ct_ft->nf_ft;
308 mutex_unlock(&zones_mutex);
309
310 return 0;
311
312err_init:
313 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
314err_insert:
315 kfree(ct_ft);
316err_alloc:
317 mutex_unlock(&zones_mutex);
318 return err;
319}
320
321static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
322{
323 struct tcf_ct_flow_table *ct_ft;
324
325 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
326 rwork);
327 nf_flow_table_free(&ct_ft->nf_ft);
328 kfree(ct_ft);
329
330 module_put(THIS_MODULE);
331}
332
333static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
334{
335 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
336
337 if (refcount_dec_and_test(¶ms->ct_ft->ref)) {
338 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
339 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
340 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
341 }
342}
343
344static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
345 struct nf_conn *ct,
346 bool tcp)
347{
348 struct flow_offload *entry;
349 int err;
350
351 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
352 return;
353
354 entry = flow_offload_alloc(ct);
355 if (!entry) {
356 WARN_ON_ONCE(1);
357 goto err_alloc;
358 }
359
360 if (tcp) {
361 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
362 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
363 }
364
365 err = flow_offload_add(&ct_ft->nf_ft, entry);
366 if (err)
367 goto err_add;
368
369 return;
370
371err_add:
372 flow_offload_free(entry);
373err_alloc:
374 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
375}
376
377static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
378 struct nf_conn *ct,
379 enum ip_conntrack_info ctinfo)
380{
381 bool tcp = false;
382
383 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
384 return;
385
386 switch (nf_ct_protonum(ct)) {
387 case IPPROTO_TCP:
388 tcp = true;
389 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
390 return;
391 break;
392 case IPPROTO_UDP:
393 break;
394 default:
395 return;
396 }
397
398 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
399 ct->status & IPS_SEQ_ADJUST)
400 return;
401
402 tcf_ct_flow_table_add(ct_ft, ct, tcp);
403}
404
405static bool
406tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
407 struct flow_offload_tuple *tuple,
408 struct tcphdr **tcph)
409{
410 struct flow_ports *ports;
411 unsigned int thoff;
412 struct iphdr *iph;
413
414 if (!pskb_network_may_pull(skb, sizeof(*iph)))
415 return false;
416
417 iph = ip_hdr(skb);
418 thoff = iph->ihl * 4;
419
420 if (ip_is_fragment(iph) ||
421 unlikely(thoff != sizeof(struct iphdr)))
422 return false;
423
424 if (iph->protocol != IPPROTO_TCP &&
425 iph->protocol != IPPROTO_UDP)
426 return false;
427
428 if (iph->ttl <= 1)
429 return false;
430
431 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
432 thoff + sizeof(struct tcphdr) :
433 thoff + sizeof(*ports)))
434 return false;
435
436 iph = ip_hdr(skb);
437 if (iph->protocol == IPPROTO_TCP)
438 *tcph = (void *)(skb_network_header(skb) + thoff);
439
440 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
441 tuple->src_v4.s_addr = iph->saddr;
442 tuple->dst_v4.s_addr = iph->daddr;
443 tuple->src_port = ports->source;
444 tuple->dst_port = ports->dest;
445 tuple->l3proto = AF_INET;
446 tuple->l4proto = iph->protocol;
447
448 return true;
449}
450
451static bool
452tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
453 struct flow_offload_tuple *tuple,
454 struct tcphdr **tcph)
455{
456 struct flow_ports *ports;
457 struct ipv6hdr *ip6h;
458 unsigned int thoff;
459
460 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
461 return false;
462
463 ip6h = ipv6_hdr(skb);
464
465 if (ip6h->nexthdr != IPPROTO_TCP &&
466 ip6h->nexthdr != IPPROTO_UDP)
467 return false;
468
469 if (ip6h->hop_limit <= 1)
470 return false;
471
472 thoff = sizeof(*ip6h);
473 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
474 thoff + sizeof(struct tcphdr) :
475 thoff + sizeof(*ports)))
476 return false;
477
478 ip6h = ipv6_hdr(skb);
479 if (ip6h->nexthdr == IPPROTO_TCP)
480 *tcph = (void *)(skb_network_header(skb) + thoff);
481
482 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
483 tuple->src_v6 = ip6h->saddr;
484 tuple->dst_v6 = ip6h->daddr;
485 tuple->src_port = ports->source;
486 tuple->dst_port = ports->dest;
487 tuple->l3proto = AF_INET6;
488 tuple->l4proto = ip6h->nexthdr;
489
490 return true;
491}
492
493static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
494 struct sk_buff *skb,
495 u8 family)
496{
497 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
498 struct flow_offload_tuple_rhash *tuplehash;
499 struct flow_offload_tuple tuple = {};
500 enum ip_conntrack_info ctinfo;
501 struct tcphdr *tcph = NULL;
502 struct flow_offload *flow;
503 struct nf_conn *ct;
504 u8 dir;
505
506 /* Previously seen or loopback */
507 ct = nf_ct_get(skb, &ctinfo);
508 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
509 return false;
510
511 switch (family) {
512 case NFPROTO_IPV4:
513 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
514 return false;
515 break;
516 case NFPROTO_IPV6:
517 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
518 return false;
519 break;
520 default:
521 return false;
522 }
523
524 tuplehash = flow_offload_lookup(nf_ft, &tuple);
525 if (!tuplehash)
526 return false;
527
528 dir = tuplehash->tuple.dir;
529 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
530 ct = flow->ct;
531
532 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
533 flow_offload_teardown(flow);
534 return false;
535 }
536
537 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
538 IP_CT_ESTABLISHED_REPLY;
539
540 flow_offload_refresh(nf_ft, flow);
541 nf_conntrack_get(&ct->ct_general);
542 nf_ct_set(skb, ct, ctinfo);
543 nf_ct_acct_update(ct, dir, skb->len);
544
545 return true;
546}
547
548static int tcf_ct_flow_tables_init(void)
549{
550 return rhashtable_init(&zones_ht, &zones_params);
551}
552
553static void tcf_ct_flow_tables_uninit(void)
554{
555 rhashtable_destroy(&zones_ht);
556}
557
558static struct tc_action_ops act_ct_ops;
559static unsigned int ct_net_id;
560
561struct tc_ct_action_net {
562 struct tc_action_net tn; /* Must be first */
563 bool labels;
564};
565
566/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
567static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
568 u16 zone_id, bool force)
569{
570 enum ip_conntrack_info ctinfo;
571 struct nf_conn *ct;
572
573 ct = nf_ct_get(skb, &ctinfo);
574 if (!ct)
575 return false;
576 if (!net_eq(net, read_pnet(&ct->ct_net)))
577 return false;
578 if (nf_ct_zone(ct)->id != zone_id)
579 return false;
580
581 /* Force conntrack entry direction. */
582 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
583 if (nf_ct_is_confirmed(ct))
584 nf_ct_kill(ct);
585
586 nf_conntrack_put(&ct->ct_general);
587 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
588
589 return false;
590 }
591
592 return true;
593}
594
595/* Trim the skb to the length specified by the IP/IPv6 header,
596 * removing any trailing lower-layer padding. This prepares the skb
597 * for higher-layer processing that assumes skb->len excludes padding
598 * (such as nf_ip_checksum). The caller needs to pull the skb to the
599 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
600 */
601static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
602{
603 unsigned int len;
604 int err;
605
606 switch (family) {
607 case NFPROTO_IPV4:
608 len = ntohs(ip_hdr(skb)->tot_len);
609 break;
610 case NFPROTO_IPV6:
611 len = sizeof(struct ipv6hdr)
612 + ntohs(ipv6_hdr(skb)->payload_len);
613 break;
614 default:
615 len = skb->len;
616 }
617
618 err = pskb_trim_rcsum(skb, len);
619
620 return err;
621}
622
623static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
624{
625 u8 family = NFPROTO_UNSPEC;
626
627 switch (skb_protocol(skb, true)) {
628 case htons(ETH_P_IP):
629 family = NFPROTO_IPV4;
630 break;
631 case htons(ETH_P_IPV6):
632 family = NFPROTO_IPV6;
633 break;
634 default:
635 break;
636 }
637
638 return family;
639}
640
641static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
642{
643 unsigned int len;
644
645 len = skb_network_offset(skb) + sizeof(struct iphdr);
646 if (unlikely(skb->len < len))
647 return -EINVAL;
648 if (unlikely(!pskb_may_pull(skb, len)))
649 return -ENOMEM;
650
651 *frag = ip_is_fragment(ip_hdr(skb));
652 return 0;
653}
654
655static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
656{
657 unsigned int flags = 0, len, payload_ofs = 0;
658 unsigned short frag_off;
659 int nexthdr;
660
661 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
662 if (unlikely(skb->len < len))
663 return -EINVAL;
664 if (unlikely(!pskb_may_pull(skb, len)))
665 return -ENOMEM;
666
667 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
668 if (unlikely(nexthdr < 0))
669 return -EPROTO;
670
671 *frag = flags & IP6_FH_F_FRAG;
672 return 0;
673}
674
675static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
676 u8 family, u16 zone, bool *defrag)
677{
678 enum ip_conntrack_info ctinfo;
679 struct qdisc_skb_cb cb;
680 struct nf_conn *ct;
681 int err = 0;
682 bool frag;
683
684 /* Previously seen (loopback)? Ignore. */
685 ct = nf_ct_get(skb, &ctinfo);
686 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
687 return 0;
688
689 if (family == NFPROTO_IPV4)
690 err = tcf_ct_ipv4_is_fragment(skb, &frag);
691 else
692 err = tcf_ct_ipv6_is_fragment(skb, &frag);
693 if (err || !frag)
694 return err;
695
696 skb_get(skb);
697 cb = *qdisc_skb_cb(skb);
698
699 if (family == NFPROTO_IPV4) {
700 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
701
702 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
703 local_bh_disable();
704 err = ip_defrag(net, skb, user);
705 local_bh_enable();
706 if (err && err != -EINPROGRESS)
707 return err;
708
709 if (!err) {
710 *defrag = true;
711 cb.mru = IPCB(skb)->frag_max_size;
712 }
713 } else { /* NFPROTO_IPV6 */
714#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
715 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
716
717 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
718 err = nf_ct_frag6_gather(net, skb, user);
719 if (err && err != -EINPROGRESS)
720 goto out_free;
721
722 if (!err) {
723 *defrag = true;
724 cb.mru = IP6CB(skb)->frag_max_size;
725 }
726#else
727 err = -EOPNOTSUPP;
728 goto out_free;
729#endif
730 }
731
732 *qdisc_skb_cb(skb) = cb;
733 skb_clear_hash(skb);
734 skb->ignore_df = 1;
735 return err;
736
737out_free:
738 kfree_skb(skb);
739 return err;
740}
741
742static void tcf_ct_params_free(struct rcu_head *head)
743{
744 struct tcf_ct_params *params = container_of(head,
745 struct tcf_ct_params, rcu);
746
747 tcf_ct_flow_table_put(params);
748
749 if (params->tmpl)
750 nf_conntrack_put(¶ms->tmpl->ct_general);
751 kfree(params);
752}
753
754#if IS_ENABLED(CONFIG_NF_NAT)
755/* Modelled after nf_nat_ipv[46]_fn().
756 * range is only used for new, uninitialized NAT state.
757 * Returns either NF_ACCEPT or NF_DROP.
758 */
759static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
760 enum ip_conntrack_info ctinfo,
761 const struct nf_nat_range2 *range,
762 enum nf_nat_manip_type maniptype)
763{
764 __be16 proto = skb_protocol(skb, true);
765 int hooknum, err = NF_ACCEPT;
766
767 /* See HOOK2MANIP(). */
768 if (maniptype == NF_NAT_MANIP_SRC)
769 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
770 else
771 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
772
773 switch (ctinfo) {
774 case IP_CT_RELATED:
775 case IP_CT_RELATED_REPLY:
776 if (proto == htons(ETH_P_IP) &&
777 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
778 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
779 hooknum))
780 err = NF_DROP;
781 goto out;
782 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
783 __be16 frag_off;
784 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
785 int hdrlen = ipv6_skip_exthdr(skb,
786 sizeof(struct ipv6hdr),
787 &nexthdr, &frag_off);
788
789 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
790 if (!nf_nat_icmpv6_reply_translation(skb, ct,
791 ctinfo,
792 hooknum,
793 hdrlen))
794 err = NF_DROP;
795 goto out;
796 }
797 }
798 /* Non-ICMP, fall thru to initialize if needed. */
799 fallthrough;
800 case IP_CT_NEW:
801 /* Seen it before? This can happen for loopback, retrans,
802 * or local packets.
803 */
804 if (!nf_nat_initialized(ct, maniptype)) {
805 /* Initialize according to the NAT action. */
806 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
807 /* Action is set up to establish a new
808 * mapping.
809 */
810 ? nf_nat_setup_info(ct, range, maniptype)
811 : nf_nat_alloc_null_binding(ct, hooknum);
812 if (err != NF_ACCEPT)
813 goto out;
814 }
815 break;
816
817 case IP_CT_ESTABLISHED:
818 case IP_CT_ESTABLISHED_REPLY:
819 break;
820
821 default:
822 err = NF_DROP;
823 goto out;
824 }
825
826 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
827out:
828 return err;
829}
830#endif /* CONFIG_NF_NAT */
831
832static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
833{
834#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
835 u32 new_mark;
836
837 if (!mask)
838 return;
839
840 new_mark = mark | (ct->mark & ~(mask));
841 if (ct->mark != new_mark) {
842 ct->mark = new_mark;
843 if (nf_ct_is_confirmed(ct))
844 nf_conntrack_event_cache(IPCT_MARK, ct);
845 }
846#endif
847}
848
849static void tcf_ct_act_set_labels(struct nf_conn *ct,
850 u32 *labels,
851 u32 *labels_m)
852{
853#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
854 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
855
856 if (!memchr_inv(labels_m, 0, labels_sz))
857 return;
858
859 nf_connlabels_replace(ct, labels, labels_m, 4);
860#endif
861}
862
863static int tcf_ct_act_nat(struct sk_buff *skb,
864 struct nf_conn *ct,
865 enum ip_conntrack_info ctinfo,
866 int ct_action,
867 struct nf_nat_range2 *range,
868 bool commit)
869{
870#if IS_ENABLED(CONFIG_NF_NAT)
871 int err;
872 enum nf_nat_manip_type maniptype;
873
874 if (!(ct_action & TCA_CT_ACT_NAT))
875 return NF_ACCEPT;
876
877 /* Add NAT extension if not confirmed yet. */
878 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
879 return NF_DROP; /* Can't NAT. */
880
881 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
882 (ctinfo != IP_CT_RELATED || commit)) {
883 /* NAT an established or related connection like before. */
884 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
885 /* This is the REPLY direction for a connection
886 * for which NAT was applied in the forward
887 * direction. Do the reverse NAT.
888 */
889 maniptype = ct->status & IPS_SRC_NAT
890 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
891 else
892 maniptype = ct->status & IPS_SRC_NAT
893 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
894 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
895 maniptype = NF_NAT_MANIP_SRC;
896 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
897 maniptype = NF_NAT_MANIP_DST;
898 } else {
899 return NF_ACCEPT;
900 }
901
902 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
903 if (err == NF_ACCEPT &&
904 ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
905 if (maniptype == NF_NAT_MANIP_SRC)
906 maniptype = NF_NAT_MANIP_DST;
907 else
908 maniptype = NF_NAT_MANIP_SRC;
909
910 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
911 }
912 return err;
913#else
914 return NF_ACCEPT;
915#endif
916}
917
918static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
919 struct tcf_result *res)
920{
921 struct net *net = dev_net(skb->dev);
922 bool cached, commit, clear, force;
923 enum ip_conntrack_info ctinfo;
924 struct tcf_ct *c = to_ct(a);
925 struct nf_conn *tmpl = NULL;
926 struct nf_hook_state state;
927 int nh_ofs, err, retval;
928 struct tcf_ct_params *p;
929 bool skip_add = false;
930 bool defrag = false;
931 struct nf_conn *ct;
932 u8 family;
933
934 p = rcu_dereference_bh(c->params);
935
936 retval = READ_ONCE(c->tcf_action);
937 commit = p->ct_action & TCA_CT_ACT_COMMIT;
938 clear = p->ct_action & TCA_CT_ACT_CLEAR;
939 force = p->ct_action & TCA_CT_ACT_FORCE;
940 tmpl = p->tmpl;
941
942 tcf_lastuse_update(&c->tcf_tm);
943
944 if (clear) {
945 ct = nf_ct_get(skb, &ctinfo);
946 if (ct) {
947 nf_conntrack_put(&ct->ct_general);
948 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
949 }
950
951 goto out;
952 }
953
954 family = tcf_ct_skb_nf_family(skb);
955 if (family == NFPROTO_UNSPEC)
956 goto drop;
957
958 /* The conntrack module expects to be working at L3.
959 * We also try to pull the IPv4/6 header to linear area
960 */
961 nh_ofs = skb_network_offset(skb);
962 skb_pull_rcsum(skb, nh_ofs);
963 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
964 if (err == -EINPROGRESS) {
965 retval = TC_ACT_STOLEN;
966 goto out;
967 }
968 if (err)
969 goto drop;
970
971 err = tcf_ct_skb_network_trim(skb, family);
972 if (err)
973 goto drop;
974
975 /* If we are recirculating packets to match on ct fields and
976 * committing with a separate ct action, then we don't need to
977 * actually run the packet through conntrack twice unless it's for a
978 * different zone.
979 */
980 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
981 if (!cached) {
982 if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
983 skip_add = true;
984 goto do_nat;
985 }
986
987 /* Associate skb with specified zone. */
988 if (tmpl) {
989 ct = nf_ct_get(skb, &ctinfo);
990 if (skb_nfct(skb))
991 nf_conntrack_put(skb_nfct(skb));
992 nf_conntrack_get(&tmpl->ct_general);
993 nf_ct_set(skb, tmpl, IP_CT_NEW);
994 }
995
996 state.hook = NF_INET_PRE_ROUTING;
997 state.net = net;
998 state.pf = family;
999 err = nf_conntrack_in(skb, &state);
1000 if (err != NF_ACCEPT)
1001 goto out_push;
1002 }
1003
1004do_nat:
1005 ct = nf_ct_get(skb, &ctinfo);
1006 if (!ct)
1007 goto out_push;
1008 nf_ct_deliver_cached_events(ct);
1009
1010 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1011 if (err != NF_ACCEPT)
1012 goto drop;
1013
1014 if (commit) {
1015 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1016 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1017
1018 /* This will take care of sending queued events
1019 * even if the connection is already confirmed.
1020 */
1021 nf_conntrack_confirm(skb);
1022 } else if (!skip_add) {
1023 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1024 }
1025
1026out_push:
1027 skb_push_rcsum(skb, nh_ofs);
1028
1029out:
1030 tcf_action_update_bstats(&c->common, skb);
1031 if (defrag)
1032 qdisc_skb_cb(skb)->pkt_len = skb->len;
1033 return retval;
1034
1035drop:
1036 tcf_action_inc_drop_qstats(&c->common);
1037 return TC_ACT_SHOT;
1038}
1039
1040static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1041 [TCA_CT_ACTION] = { .type = NLA_U16 },
1042 [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) },
1043 [TCA_CT_ZONE] = { .type = NLA_U16 },
1044 [TCA_CT_MARK] = { .type = NLA_U32 },
1045 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1046 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1047 .len = 128 / BITS_PER_BYTE },
1048 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1049 .len = 128 / BITS_PER_BYTE },
1050 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1051 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1052 [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN,
1053 .len = sizeof(struct in6_addr) },
1054 [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN,
1055 .len = sizeof(struct in6_addr) },
1056 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1057 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1058};
1059
1060static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1061 struct tc_ct *parm,
1062 struct nlattr **tb,
1063 struct netlink_ext_ack *extack)
1064{
1065 struct nf_nat_range2 *range;
1066
1067 if (!(p->ct_action & TCA_CT_ACT_NAT))
1068 return 0;
1069
1070 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1071 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1072 return -EOPNOTSUPP;
1073 }
1074
1075 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1076 return 0;
1077
1078 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1079 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1080 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1081 return -EOPNOTSUPP;
1082 }
1083
1084 range = &p->range;
1085 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1086 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1087
1088 p->ipv4_range = true;
1089 range->flags |= NF_NAT_RANGE_MAP_IPS;
1090 range->min_addr.ip =
1091 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1092
1093 range->max_addr.ip = max_attr ?
1094 nla_get_in_addr(max_attr) :
1095 range->min_addr.ip;
1096 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1097 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1098
1099 p->ipv4_range = false;
1100 range->flags |= NF_NAT_RANGE_MAP_IPS;
1101 range->min_addr.in6 =
1102 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1103
1104 range->max_addr.in6 = max_attr ?
1105 nla_get_in6_addr(max_attr) :
1106 range->min_addr.in6;
1107 }
1108
1109 if (tb[TCA_CT_NAT_PORT_MIN]) {
1110 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1111 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1112
1113 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1114 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1115 range->min_proto.all;
1116 }
1117
1118 return 0;
1119}
1120
1121static void tcf_ct_set_key_val(struct nlattr **tb,
1122 void *val, int val_type,
1123 void *mask, int mask_type,
1124 int len)
1125{
1126 if (!tb[val_type])
1127 return;
1128 nla_memcpy(val, tb[val_type], len);
1129
1130 if (!mask)
1131 return;
1132
1133 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1134 memset(mask, 0xff, len);
1135 else
1136 nla_memcpy(mask, tb[mask_type], len);
1137}
1138
1139static int tcf_ct_fill_params(struct net *net,
1140 struct tcf_ct_params *p,
1141 struct tc_ct *parm,
1142 struct nlattr **tb,
1143 struct netlink_ext_ack *extack)
1144{
1145 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1146 struct nf_conntrack_zone zone;
1147 struct nf_conn *tmpl;
1148 int err;
1149
1150 p->zone = NF_CT_DEFAULT_ZONE_ID;
1151
1152 tcf_ct_set_key_val(tb,
1153 &p->ct_action, TCA_CT_ACTION,
1154 NULL, TCA_CT_UNSPEC,
1155 sizeof(p->ct_action));
1156
1157 if (p->ct_action & TCA_CT_ACT_CLEAR)
1158 return 0;
1159
1160 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1161 if (err)
1162 return err;
1163
1164 if (tb[TCA_CT_MARK]) {
1165 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1166 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1167 return -EOPNOTSUPP;
1168 }
1169 tcf_ct_set_key_val(tb,
1170 &p->mark, TCA_CT_MARK,
1171 &p->mark_mask, TCA_CT_MARK_MASK,
1172 sizeof(p->mark));
1173 }
1174
1175 if (tb[TCA_CT_LABELS]) {
1176 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1177 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1178 return -EOPNOTSUPP;
1179 }
1180
1181 if (!tn->labels) {
1182 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1183 return -EOPNOTSUPP;
1184 }
1185 tcf_ct_set_key_val(tb,
1186 p->labels, TCA_CT_LABELS,
1187 p->labels_mask, TCA_CT_LABELS_MASK,
1188 sizeof(p->labels));
1189 }
1190
1191 if (tb[TCA_CT_ZONE]) {
1192 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1193 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1194 return -EOPNOTSUPP;
1195 }
1196
1197 tcf_ct_set_key_val(tb,
1198 &p->zone, TCA_CT_ZONE,
1199 NULL, TCA_CT_UNSPEC,
1200 sizeof(p->zone));
1201 }
1202
1203 if (p->zone == NF_CT_DEFAULT_ZONE_ID)
1204 return 0;
1205
1206 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1207 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1208 if (!tmpl) {
1209 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1210 return -ENOMEM;
1211 }
1212 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1213 nf_conntrack_get(&tmpl->ct_general);
1214 p->tmpl = tmpl;
1215
1216 return 0;
1217}
1218
1219static int tcf_ct_init(struct net *net, struct nlattr *nla,
1220 struct nlattr *est, struct tc_action **a,
1221 int replace, int bind, bool rtnl_held,
1222 struct tcf_proto *tp, u32 flags,
1223 struct netlink_ext_ack *extack)
1224{
1225 struct tc_action_net *tn = net_generic(net, ct_net_id);
1226 struct tcf_ct_params *params = NULL;
1227 struct nlattr *tb[TCA_CT_MAX + 1];
1228 struct tcf_chain *goto_ch = NULL;
1229 struct tc_ct *parm;
1230 struct tcf_ct *c;
1231 int err, res = 0;
1232 u32 index;
1233
1234 if (!nla) {
1235 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1236 return -EINVAL;
1237 }
1238
1239 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1240 if (err < 0)
1241 return err;
1242
1243 if (!tb[TCA_CT_PARMS]) {
1244 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1245 return -EINVAL;
1246 }
1247 parm = nla_data(tb[TCA_CT_PARMS]);
1248 index = parm->index;
1249 err = tcf_idr_check_alloc(tn, &index, a, bind);
1250 if (err < 0)
1251 return err;
1252
1253 if (!err) {
1254 err = tcf_idr_create_from_flags(tn, index, est, a,
1255 &act_ct_ops, bind, flags);
1256 if (err) {
1257 tcf_idr_cleanup(tn, index);
1258 return err;
1259 }
1260 res = ACT_P_CREATED;
1261 } else {
1262 if (bind)
1263 return 0;
1264
1265 if (!replace) {
1266 tcf_idr_release(*a, bind);
1267 return -EEXIST;
1268 }
1269 }
1270 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1271 if (err < 0)
1272 goto cleanup;
1273
1274 c = to_ct(*a);
1275
1276 params = kzalloc(sizeof(*params), GFP_KERNEL);
1277 if (unlikely(!params)) {
1278 err = -ENOMEM;
1279 goto cleanup;
1280 }
1281
1282 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1283 if (err)
1284 goto cleanup;
1285
1286 err = tcf_ct_flow_table_get(params);
1287 if (err)
1288 goto cleanup;
1289
1290 spin_lock_bh(&c->tcf_lock);
1291 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1292 params = rcu_replace_pointer(c->params, params,
1293 lockdep_is_held(&c->tcf_lock));
1294 spin_unlock_bh(&c->tcf_lock);
1295
1296 if (goto_ch)
1297 tcf_chain_put_by_act(goto_ch);
1298 if (params)
1299 call_rcu(¶ms->rcu, tcf_ct_params_free);
1300
1301 return res;
1302
1303cleanup:
1304 if (goto_ch)
1305 tcf_chain_put_by_act(goto_ch);
1306 kfree(params);
1307 tcf_idr_release(*a, bind);
1308 return err;
1309}
1310
1311static void tcf_ct_cleanup(struct tc_action *a)
1312{
1313 struct tcf_ct_params *params;
1314 struct tcf_ct *c = to_ct(a);
1315
1316 params = rcu_dereference_protected(c->params, 1);
1317 if (params)
1318 call_rcu(¶ms->rcu, tcf_ct_params_free);
1319}
1320
1321static int tcf_ct_dump_key_val(struct sk_buff *skb,
1322 void *val, int val_type,
1323 void *mask, int mask_type,
1324 int len)
1325{
1326 int err;
1327
1328 if (mask && !memchr_inv(mask, 0, len))
1329 return 0;
1330
1331 err = nla_put(skb, val_type, len, val);
1332 if (err)
1333 return err;
1334
1335 if (mask_type != TCA_CT_UNSPEC) {
1336 err = nla_put(skb, mask_type, len, mask);
1337 if (err)
1338 return err;
1339 }
1340
1341 return 0;
1342}
1343
1344static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1345{
1346 struct nf_nat_range2 *range = &p->range;
1347
1348 if (!(p->ct_action & TCA_CT_ACT_NAT))
1349 return 0;
1350
1351 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1352 return 0;
1353
1354 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1355 if (p->ipv4_range) {
1356 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1357 range->min_addr.ip))
1358 return -1;
1359 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1360 range->max_addr.ip))
1361 return -1;
1362 } else {
1363 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1364 &range->min_addr.in6))
1365 return -1;
1366 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1367 &range->max_addr.in6))
1368 return -1;
1369 }
1370 }
1371
1372 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1373 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1374 range->min_proto.all))
1375 return -1;
1376 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1377 range->max_proto.all))
1378 return -1;
1379 }
1380
1381 return 0;
1382}
1383
1384static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1385 int bind, int ref)
1386{
1387 unsigned char *b = skb_tail_pointer(skb);
1388 struct tcf_ct *c = to_ct(a);
1389 struct tcf_ct_params *p;
1390
1391 struct tc_ct opt = {
1392 .index = c->tcf_index,
1393 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1394 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1395 };
1396 struct tcf_t t;
1397
1398 spin_lock_bh(&c->tcf_lock);
1399 p = rcu_dereference_protected(c->params,
1400 lockdep_is_held(&c->tcf_lock));
1401 opt.action = c->tcf_action;
1402
1403 if (tcf_ct_dump_key_val(skb,
1404 &p->ct_action, TCA_CT_ACTION,
1405 NULL, TCA_CT_UNSPEC,
1406 sizeof(p->ct_action)))
1407 goto nla_put_failure;
1408
1409 if (p->ct_action & TCA_CT_ACT_CLEAR)
1410 goto skip_dump;
1411
1412 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1413 tcf_ct_dump_key_val(skb,
1414 &p->mark, TCA_CT_MARK,
1415 &p->mark_mask, TCA_CT_MARK_MASK,
1416 sizeof(p->mark)))
1417 goto nla_put_failure;
1418
1419 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1420 tcf_ct_dump_key_val(skb,
1421 p->labels, TCA_CT_LABELS,
1422 p->labels_mask, TCA_CT_LABELS_MASK,
1423 sizeof(p->labels)))
1424 goto nla_put_failure;
1425
1426 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1427 tcf_ct_dump_key_val(skb,
1428 &p->zone, TCA_CT_ZONE,
1429 NULL, TCA_CT_UNSPEC,
1430 sizeof(p->zone)))
1431 goto nla_put_failure;
1432
1433 if (tcf_ct_dump_nat(skb, p))
1434 goto nla_put_failure;
1435
1436skip_dump:
1437 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1438 goto nla_put_failure;
1439
1440 tcf_tm_dump(&t, &c->tcf_tm);
1441 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1442 goto nla_put_failure;
1443 spin_unlock_bh(&c->tcf_lock);
1444
1445 return skb->len;
1446nla_put_failure:
1447 spin_unlock_bh(&c->tcf_lock);
1448 nlmsg_trim(skb, b);
1449 return -1;
1450}
1451
1452static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1453 struct netlink_callback *cb, int type,
1454 const struct tc_action_ops *ops,
1455 struct netlink_ext_ack *extack)
1456{
1457 struct tc_action_net *tn = net_generic(net, ct_net_id);
1458
1459 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1460}
1461
1462static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1463{
1464 struct tc_action_net *tn = net_generic(net, ct_net_id);
1465
1466 return tcf_idr_search(tn, a, index);
1467}
1468
1469static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1470 u64 drops, u64 lastuse, bool hw)
1471{
1472 struct tcf_ct *c = to_ct(a);
1473
1474 tcf_action_update_stats(a, bytes, packets, drops, hw);
1475 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1476}
1477
1478static struct tc_action_ops act_ct_ops = {
1479 .kind = "ct",
1480 .id = TCA_ID_CT,
1481 .owner = THIS_MODULE,
1482 .act = tcf_ct_act,
1483 .dump = tcf_ct_dump,
1484 .init = tcf_ct_init,
1485 .cleanup = tcf_ct_cleanup,
1486 .walk = tcf_ct_walker,
1487 .lookup = tcf_ct_search,
1488 .stats_update = tcf_stats_update,
1489 .size = sizeof(struct tcf_ct),
1490};
1491
1492static __net_init int ct_init_net(struct net *net)
1493{
1494 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1495 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1496
1497 if (nf_connlabels_get(net, n_bits - 1)) {
1498 tn->labels = false;
1499 pr_err("act_ct: Failed to set connlabels length");
1500 } else {
1501 tn->labels = true;
1502 }
1503
1504 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1505}
1506
1507static void __net_exit ct_exit_net(struct list_head *net_list)
1508{
1509 struct net *net;
1510
1511 rtnl_lock();
1512 list_for_each_entry(net, net_list, exit_list) {
1513 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1514
1515 if (tn->labels)
1516 nf_connlabels_put(net);
1517 }
1518 rtnl_unlock();
1519
1520 tc_action_net_exit(net_list, ct_net_id);
1521}
1522
1523static struct pernet_operations ct_net_ops = {
1524 .init = ct_init_net,
1525 .exit_batch = ct_exit_net,
1526 .id = &ct_net_id,
1527 .size = sizeof(struct tc_ct_action_net),
1528};
1529
1530static int __init ct_init_module(void)
1531{
1532 int err;
1533
1534 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1535 if (!act_ct_wq)
1536 return -ENOMEM;
1537
1538 err = tcf_ct_flow_tables_init();
1539 if (err)
1540 goto err_tbl_init;
1541
1542 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1543 if (err)
1544 goto err_register;
1545
1546 return 0;
1547
1548err_register:
1549 tcf_ct_flow_tables_uninit();
1550err_tbl_init:
1551 destroy_workqueue(act_ct_wq);
1552 return err;
1553}
1554
1555static void __exit ct_cleanup_module(void)
1556{
1557 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1558 tcf_ct_flow_tables_uninit();
1559 destroy_workqueue(act_ct_wq);
1560}
1561
1562module_init(ct_init_module);
1563module_exit(ct_cleanup_module);
1564MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1565MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1566MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1567MODULE_DESCRIPTION("Connection tracking action");
1568MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c Connection Tracking action
4 *
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/rhashtable.h>
19#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27
28#include <net/netfilter/nf_flow_table.h>
29#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_zones.h>
32#include <net/netfilter/nf_conntrack_helper.h>
33#include <net/netfilter/nf_conntrack_acct.h>
34#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35#include <uapi/linux/netfilter/nf_nat.h>
36
37static struct workqueue_struct *act_ct_wq;
38static struct rhashtable zones_ht;
39static DEFINE_MUTEX(zones_mutex);
40
41struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
43
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
46 refcount_t ref;
47 u16 zone;
48
49 bool dying;
50};
51
52static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
57};
58
59static struct flow_action_entry *
60tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61{
62 int i = flow_action->num_entries++;
63
64 return &flow_action->entries[i];
65}
66
67static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
69 u32 offset,
70 u32 mask,
71 u32 val)
72{
73 struct flow_action_entry *entry;
74
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
81}
82
83/* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
86 */
87static void
88tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
91{
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
95 0xFFFFFFFF,
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
100 0xFFFFFFFF,
101 be32_to_cpu(target.dst.u3.ip));
102}
103
104static void
105tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
107 u32 offset)
108{
109 int i;
110
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115}
116
117static void
118tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
121{
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
125 saddr));
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
129 daddr));
130}
131
132static void
133tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
136{
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
139
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
148}
149
150static void
151tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
154{
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
157
158 if (target_src != tuple->src.u.udp.port)
159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
166}
167
168static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
171{
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
174 enum ip_conntrack_info ctinfo;
175 u32 *act_ct_labels;
176
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = ct->mark;
181#endif
182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186 entry->ct_metadata.orig_dir = dir == IP_CT_DIR_ORIGINAL;
187
188 act_ct_labels = entry->ct_metadata.labels;
189 ct_labels = nf_ct_labels_find(ct);
190 if (ct_labels)
191 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
192 else
193 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
194}
195
196static int tcf_ct_flow_table_add_action_nat(struct net *net,
197 struct nf_conn *ct,
198 enum ip_conntrack_dir dir,
199 struct flow_action *action)
200{
201 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
202 struct nf_conntrack_tuple target;
203
204 if (!(ct->status & IPS_NAT_MASK))
205 return 0;
206
207 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
208
209 switch (tuple->src.l3num) {
210 case NFPROTO_IPV4:
211 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
212 action);
213 break;
214 case NFPROTO_IPV6:
215 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
216 action);
217 break;
218 default:
219 return -EOPNOTSUPP;
220 }
221
222 switch (nf_ct_protonum(ct)) {
223 case IPPROTO_TCP:
224 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
225 break;
226 case IPPROTO_UDP:
227 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
228 break;
229 default:
230 return -EOPNOTSUPP;
231 }
232
233 return 0;
234}
235
236static int tcf_ct_flow_table_fill_actions(struct net *net,
237 const struct flow_offload *flow,
238 enum flow_offload_tuple_dir tdir,
239 struct nf_flow_rule *flow_rule)
240{
241 struct flow_action *action = &flow_rule->rule->action;
242 int num_entries = action->num_entries;
243 struct nf_conn *ct = flow->ct;
244 enum ip_conntrack_dir dir;
245 int i, err;
246
247 switch (tdir) {
248 case FLOW_OFFLOAD_DIR_ORIGINAL:
249 dir = IP_CT_DIR_ORIGINAL;
250 break;
251 case FLOW_OFFLOAD_DIR_REPLY:
252 dir = IP_CT_DIR_REPLY;
253 break;
254 default:
255 return -EOPNOTSUPP;
256 }
257
258 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
259 if (err)
260 goto err_nat;
261
262 tcf_ct_flow_table_add_action_meta(ct, dir, action);
263 return 0;
264
265err_nat:
266 /* Clear filled actions */
267 for (i = num_entries; i < action->num_entries; i++)
268 memset(&action->entries[i], 0, sizeof(action->entries[i]));
269 action->num_entries = num_entries;
270
271 return err;
272}
273
274static struct nf_flowtable_type flowtable_ct = {
275 .action = tcf_ct_flow_table_fill_actions,
276 .owner = THIS_MODULE,
277};
278
279static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
280{
281 struct tcf_ct_flow_table *ct_ft;
282 int err = -ENOMEM;
283
284 mutex_lock(&zones_mutex);
285 ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params);
286 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
287 goto out_unlock;
288
289 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
290 if (!ct_ft)
291 goto err_alloc;
292 refcount_set(&ct_ft->ref, 1);
293
294 ct_ft->zone = params->zone;
295 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
296 if (err)
297 goto err_insert;
298
299 ct_ft->nf_ft.type = &flowtable_ct;
300 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
301 NF_FLOWTABLE_COUNTER;
302 err = nf_flow_table_init(&ct_ft->nf_ft);
303 if (err)
304 goto err_init;
305
306 __module_get(THIS_MODULE);
307out_unlock:
308 params->ct_ft = ct_ft;
309 params->nf_ft = &ct_ft->nf_ft;
310 mutex_unlock(&zones_mutex);
311
312 return 0;
313
314err_init:
315 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
316err_insert:
317 kfree(ct_ft);
318err_alloc:
319 mutex_unlock(&zones_mutex);
320 return err;
321}
322
323static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
324{
325 struct flow_block_cb *block_cb, *tmp_cb;
326 struct tcf_ct_flow_table *ct_ft;
327 struct flow_block *block;
328
329 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
330 rwork);
331 nf_flow_table_free(&ct_ft->nf_ft);
332
333 /* Remove any remaining callbacks before cleanup */
334 block = &ct_ft->nf_ft.flow_block;
335 down_write(&ct_ft->nf_ft.flow_block_lock);
336 list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
337 list_del(&block_cb->list);
338 flow_block_cb_free(block_cb);
339 }
340 up_write(&ct_ft->nf_ft.flow_block_lock);
341 kfree(ct_ft);
342
343 module_put(THIS_MODULE);
344}
345
346static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
347{
348 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
349
350 if (refcount_dec_and_test(¶ms->ct_ft->ref)) {
351 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
352 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
353 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
354 }
355}
356
357static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
358 struct nf_conn *ct,
359 bool tcp)
360{
361 struct flow_offload *entry;
362 int err;
363
364 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
365 return;
366
367 entry = flow_offload_alloc(ct);
368 if (!entry) {
369 WARN_ON_ONCE(1);
370 goto err_alloc;
371 }
372
373 if (tcp) {
374 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
375 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
376 }
377
378 err = flow_offload_add(&ct_ft->nf_ft, entry);
379 if (err)
380 goto err_add;
381
382 return;
383
384err_add:
385 flow_offload_free(entry);
386err_alloc:
387 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
388}
389
390static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
391 struct nf_conn *ct,
392 enum ip_conntrack_info ctinfo)
393{
394 bool tcp = false;
395
396 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
397 return;
398
399 switch (nf_ct_protonum(ct)) {
400 case IPPROTO_TCP:
401 tcp = true;
402 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
403 return;
404 break;
405 case IPPROTO_UDP:
406 break;
407 default:
408 return;
409 }
410
411 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
412 ct->status & IPS_SEQ_ADJUST)
413 return;
414
415 tcf_ct_flow_table_add(ct_ft, ct, tcp);
416}
417
418static bool
419tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
420 struct flow_offload_tuple *tuple,
421 struct tcphdr **tcph)
422{
423 struct flow_ports *ports;
424 unsigned int thoff;
425 struct iphdr *iph;
426
427 if (!pskb_network_may_pull(skb, sizeof(*iph)))
428 return false;
429
430 iph = ip_hdr(skb);
431 thoff = iph->ihl * 4;
432
433 if (ip_is_fragment(iph) ||
434 unlikely(thoff != sizeof(struct iphdr)))
435 return false;
436
437 if (iph->protocol != IPPROTO_TCP &&
438 iph->protocol != IPPROTO_UDP)
439 return false;
440
441 if (iph->ttl <= 1)
442 return false;
443
444 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
445 thoff + sizeof(struct tcphdr) :
446 thoff + sizeof(*ports)))
447 return false;
448
449 iph = ip_hdr(skb);
450 if (iph->protocol == IPPROTO_TCP)
451 *tcph = (void *)(skb_network_header(skb) + thoff);
452
453 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
454 tuple->src_v4.s_addr = iph->saddr;
455 tuple->dst_v4.s_addr = iph->daddr;
456 tuple->src_port = ports->source;
457 tuple->dst_port = ports->dest;
458 tuple->l3proto = AF_INET;
459 tuple->l4proto = iph->protocol;
460
461 return true;
462}
463
464static bool
465tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
466 struct flow_offload_tuple *tuple,
467 struct tcphdr **tcph)
468{
469 struct flow_ports *ports;
470 struct ipv6hdr *ip6h;
471 unsigned int thoff;
472
473 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
474 return false;
475
476 ip6h = ipv6_hdr(skb);
477
478 if (ip6h->nexthdr != IPPROTO_TCP &&
479 ip6h->nexthdr != IPPROTO_UDP)
480 return false;
481
482 if (ip6h->hop_limit <= 1)
483 return false;
484
485 thoff = sizeof(*ip6h);
486 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
487 thoff + sizeof(struct tcphdr) :
488 thoff + sizeof(*ports)))
489 return false;
490
491 ip6h = ipv6_hdr(skb);
492 if (ip6h->nexthdr == IPPROTO_TCP)
493 *tcph = (void *)(skb_network_header(skb) + thoff);
494
495 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
496 tuple->src_v6 = ip6h->saddr;
497 tuple->dst_v6 = ip6h->daddr;
498 tuple->src_port = ports->source;
499 tuple->dst_port = ports->dest;
500 tuple->l3proto = AF_INET6;
501 tuple->l4proto = ip6h->nexthdr;
502
503 return true;
504}
505
506static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
507 struct sk_buff *skb,
508 u8 family)
509{
510 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
511 struct flow_offload_tuple_rhash *tuplehash;
512 struct flow_offload_tuple tuple = {};
513 enum ip_conntrack_info ctinfo;
514 struct tcphdr *tcph = NULL;
515 struct flow_offload *flow;
516 struct nf_conn *ct;
517 u8 dir;
518
519 /* Previously seen or loopback */
520 ct = nf_ct_get(skb, &ctinfo);
521 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
522 return false;
523
524 switch (family) {
525 case NFPROTO_IPV4:
526 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
527 return false;
528 break;
529 case NFPROTO_IPV6:
530 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
531 return false;
532 break;
533 default:
534 return false;
535 }
536
537 tuplehash = flow_offload_lookup(nf_ft, &tuple);
538 if (!tuplehash)
539 return false;
540
541 dir = tuplehash->tuple.dir;
542 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
543 ct = flow->ct;
544
545 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
546 flow_offload_teardown(flow);
547 return false;
548 }
549
550 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
551 IP_CT_ESTABLISHED_REPLY;
552
553 flow_offload_refresh(nf_ft, flow);
554 nf_conntrack_get(&ct->ct_general);
555 nf_ct_set(skb, ct, ctinfo);
556 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
557 nf_ct_acct_update(ct, dir, skb->len);
558
559 return true;
560}
561
562static int tcf_ct_flow_tables_init(void)
563{
564 return rhashtable_init(&zones_ht, &zones_params);
565}
566
567static void tcf_ct_flow_tables_uninit(void)
568{
569 rhashtable_destroy(&zones_ht);
570}
571
572static struct tc_action_ops act_ct_ops;
573static unsigned int ct_net_id;
574
575struct tc_ct_action_net {
576 struct tc_action_net tn; /* Must be first */
577 bool labels;
578};
579
580/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
581static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
582 u16 zone_id, bool force)
583{
584 enum ip_conntrack_info ctinfo;
585 struct nf_conn *ct;
586
587 ct = nf_ct_get(skb, &ctinfo);
588 if (!ct)
589 return false;
590 if (!net_eq(net, read_pnet(&ct->ct_net)))
591 return false;
592 if (nf_ct_zone(ct)->id != zone_id)
593 return false;
594
595 /* Force conntrack entry direction. */
596 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
597 if (nf_ct_is_confirmed(ct))
598 nf_ct_kill(ct);
599
600 nf_conntrack_put(&ct->ct_general);
601 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
602
603 return false;
604 }
605
606 return true;
607}
608
609/* Trim the skb to the length specified by the IP/IPv6 header,
610 * removing any trailing lower-layer padding. This prepares the skb
611 * for higher-layer processing that assumes skb->len excludes padding
612 * (such as nf_ip_checksum). The caller needs to pull the skb to the
613 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
614 */
615static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
616{
617 unsigned int len;
618 int err;
619
620 switch (family) {
621 case NFPROTO_IPV4:
622 len = ntohs(ip_hdr(skb)->tot_len);
623 break;
624 case NFPROTO_IPV6:
625 len = sizeof(struct ipv6hdr)
626 + ntohs(ipv6_hdr(skb)->payload_len);
627 break;
628 default:
629 len = skb->len;
630 }
631
632 err = pskb_trim_rcsum(skb, len);
633
634 return err;
635}
636
637static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
638{
639 u8 family = NFPROTO_UNSPEC;
640
641 switch (skb_protocol(skb, true)) {
642 case htons(ETH_P_IP):
643 family = NFPROTO_IPV4;
644 break;
645 case htons(ETH_P_IPV6):
646 family = NFPROTO_IPV6;
647 break;
648 default:
649 break;
650 }
651
652 return family;
653}
654
655static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
656{
657 unsigned int len;
658
659 len = skb_network_offset(skb) + sizeof(struct iphdr);
660 if (unlikely(skb->len < len))
661 return -EINVAL;
662 if (unlikely(!pskb_may_pull(skb, len)))
663 return -ENOMEM;
664
665 *frag = ip_is_fragment(ip_hdr(skb));
666 return 0;
667}
668
669static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
670{
671 unsigned int flags = 0, len, payload_ofs = 0;
672 unsigned short frag_off;
673 int nexthdr;
674
675 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
676 if (unlikely(skb->len < len))
677 return -EINVAL;
678 if (unlikely(!pskb_may_pull(skb, len)))
679 return -ENOMEM;
680
681 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
682 if (unlikely(nexthdr < 0))
683 return -EPROTO;
684
685 *frag = flags & IP6_FH_F_FRAG;
686 return 0;
687}
688
689static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
690 u8 family, u16 zone, bool *defrag)
691{
692 enum ip_conntrack_info ctinfo;
693 struct qdisc_skb_cb cb;
694 struct nf_conn *ct;
695 int err = 0;
696 bool frag;
697
698 /* Previously seen (loopback)? Ignore. */
699 ct = nf_ct_get(skb, &ctinfo);
700 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
701 return 0;
702
703 if (family == NFPROTO_IPV4)
704 err = tcf_ct_ipv4_is_fragment(skb, &frag);
705 else
706 err = tcf_ct_ipv6_is_fragment(skb, &frag);
707 if (err || !frag)
708 return err;
709
710 skb_get(skb);
711 cb = *qdisc_skb_cb(skb);
712
713 if (family == NFPROTO_IPV4) {
714 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
715
716 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
717 local_bh_disable();
718 err = ip_defrag(net, skb, user);
719 local_bh_enable();
720 if (err && err != -EINPROGRESS)
721 return err;
722
723 if (!err) {
724 *defrag = true;
725 cb.mru = IPCB(skb)->frag_max_size;
726 }
727 } else { /* NFPROTO_IPV6 */
728#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
729 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
730
731 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
732 err = nf_ct_frag6_gather(net, skb, user);
733 if (err && err != -EINPROGRESS)
734 goto out_free;
735
736 if (!err) {
737 *defrag = true;
738 cb.mru = IP6CB(skb)->frag_max_size;
739 }
740#else
741 err = -EOPNOTSUPP;
742 goto out_free;
743#endif
744 }
745
746 if (err != -EINPROGRESS)
747 *qdisc_skb_cb(skb) = cb;
748 skb_clear_hash(skb);
749 skb->ignore_df = 1;
750 return err;
751
752out_free:
753 kfree_skb(skb);
754 return err;
755}
756
757static void tcf_ct_params_free(struct rcu_head *head)
758{
759 struct tcf_ct_params *params = container_of(head,
760 struct tcf_ct_params, rcu);
761
762 tcf_ct_flow_table_put(params);
763
764 if (params->tmpl)
765 nf_conntrack_put(¶ms->tmpl->ct_general);
766 kfree(params);
767}
768
769#if IS_ENABLED(CONFIG_NF_NAT)
770/* Modelled after nf_nat_ipv[46]_fn().
771 * range is only used for new, uninitialized NAT state.
772 * Returns either NF_ACCEPT or NF_DROP.
773 */
774static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
775 enum ip_conntrack_info ctinfo,
776 const struct nf_nat_range2 *range,
777 enum nf_nat_manip_type maniptype)
778{
779 __be16 proto = skb_protocol(skb, true);
780 int hooknum, err = NF_ACCEPT;
781
782 /* See HOOK2MANIP(). */
783 if (maniptype == NF_NAT_MANIP_SRC)
784 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
785 else
786 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
787
788 switch (ctinfo) {
789 case IP_CT_RELATED:
790 case IP_CT_RELATED_REPLY:
791 if (proto == htons(ETH_P_IP) &&
792 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
793 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
794 hooknum))
795 err = NF_DROP;
796 goto out;
797 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
798 __be16 frag_off;
799 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
800 int hdrlen = ipv6_skip_exthdr(skb,
801 sizeof(struct ipv6hdr),
802 &nexthdr, &frag_off);
803
804 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
805 if (!nf_nat_icmpv6_reply_translation(skb, ct,
806 ctinfo,
807 hooknum,
808 hdrlen))
809 err = NF_DROP;
810 goto out;
811 }
812 }
813 /* Non-ICMP, fall thru to initialize if needed. */
814 fallthrough;
815 case IP_CT_NEW:
816 /* Seen it before? This can happen for loopback, retrans,
817 * or local packets.
818 */
819 if (!nf_nat_initialized(ct, maniptype)) {
820 /* Initialize according to the NAT action. */
821 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
822 /* Action is set up to establish a new
823 * mapping.
824 */
825 ? nf_nat_setup_info(ct, range, maniptype)
826 : nf_nat_alloc_null_binding(ct, hooknum);
827 if (err != NF_ACCEPT)
828 goto out;
829 }
830 break;
831
832 case IP_CT_ESTABLISHED:
833 case IP_CT_ESTABLISHED_REPLY:
834 break;
835
836 default:
837 err = NF_DROP;
838 goto out;
839 }
840
841 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
842out:
843 return err;
844}
845#endif /* CONFIG_NF_NAT */
846
847static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
848{
849#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
850 u32 new_mark;
851
852 if (!mask)
853 return;
854
855 new_mark = mark | (ct->mark & ~(mask));
856 if (ct->mark != new_mark) {
857 ct->mark = new_mark;
858 if (nf_ct_is_confirmed(ct))
859 nf_conntrack_event_cache(IPCT_MARK, ct);
860 }
861#endif
862}
863
864static void tcf_ct_act_set_labels(struct nf_conn *ct,
865 u32 *labels,
866 u32 *labels_m)
867{
868#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
869 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
870
871 if (!memchr_inv(labels_m, 0, labels_sz))
872 return;
873
874 nf_connlabels_replace(ct, labels, labels_m, 4);
875#endif
876}
877
878static int tcf_ct_act_nat(struct sk_buff *skb,
879 struct nf_conn *ct,
880 enum ip_conntrack_info ctinfo,
881 int ct_action,
882 struct nf_nat_range2 *range,
883 bool commit)
884{
885#if IS_ENABLED(CONFIG_NF_NAT)
886 int err;
887 enum nf_nat_manip_type maniptype;
888
889 if (!(ct_action & TCA_CT_ACT_NAT))
890 return NF_ACCEPT;
891
892 /* Add NAT extension if not confirmed yet. */
893 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
894 return NF_DROP; /* Can't NAT. */
895
896 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
897 (ctinfo != IP_CT_RELATED || commit)) {
898 /* NAT an established or related connection like before. */
899 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
900 /* This is the REPLY direction for a connection
901 * for which NAT was applied in the forward
902 * direction. Do the reverse NAT.
903 */
904 maniptype = ct->status & IPS_SRC_NAT
905 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
906 else
907 maniptype = ct->status & IPS_SRC_NAT
908 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
909 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
910 maniptype = NF_NAT_MANIP_SRC;
911 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
912 maniptype = NF_NAT_MANIP_DST;
913 } else {
914 return NF_ACCEPT;
915 }
916
917 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
918 if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
919 if (ct->status & IPS_SRC_NAT) {
920 if (maniptype == NF_NAT_MANIP_SRC)
921 maniptype = NF_NAT_MANIP_DST;
922 else
923 maniptype = NF_NAT_MANIP_SRC;
924
925 err = ct_nat_execute(skb, ct, ctinfo, range,
926 maniptype);
927 } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
928 err = ct_nat_execute(skb, ct, ctinfo, NULL,
929 NF_NAT_MANIP_SRC);
930 }
931 }
932 return err;
933#else
934 return NF_ACCEPT;
935#endif
936}
937
938static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
939 struct tcf_result *res)
940{
941 struct net *net = dev_net(skb->dev);
942 bool cached, commit, clear, force;
943 enum ip_conntrack_info ctinfo;
944 struct tcf_ct *c = to_ct(a);
945 struct nf_conn *tmpl = NULL;
946 struct nf_hook_state state;
947 int nh_ofs, err, retval;
948 struct tcf_ct_params *p;
949 bool skip_add = false;
950 bool defrag = false;
951 struct nf_conn *ct;
952 u8 family;
953
954 p = rcu_dereference_bh(c->params);
955
956 retval = READ_ONCE(c->tcf_action);
957 commit = p->ct_action & TCA_CT_ACT_COMMIT;
958 clear = p->ct_action & TCA_CT_ACT_CLEAR;
959 force = p->ct_action & TCA_CT_ACT_FORCE;
960 tmpl = p->tmpl;
961
962 tcf_lastuse_update(&c->tcf_tm);
963 tcf_action_update_bstats(&c->common, skb);
964
965 if (clear) {
966 qdisc_skb_cb(skb)->post_ct = false;
967 ct = nf_ct_get(skb, &ctinfo);
968 if (ct) {
969 nf_conntrack_put(&ct->ct_general);
970 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
971 }
972
973 goto out_clear;
974 }
975
976 family = tcf_ct_skb_nf_family(skb);
977 if (family == NFPROTO_UNSPEC)
978 goto drop;
979
980 /* The conntrack module expects to be working at L3.
981 * We also try to pull the IPv4/6 header to linear area
982 */
983 nh_ofs = skb_network_offset(skb);
984 skb_pull_rcsum(skb, nh_ofs);
985 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
986 if (err == -EINPROGRESS) {
987 retval = TC_ACT_STOLEN;
988 goto out_clear;
989 }
990 if (err)
991 goto drop;
992
993 err = tcf_ct_skb_network_trim(skb, family);
994 if (err)
995 goto drop;
996
997 /* If we are recirculating packets to match on ct fields and
998 * committing with a separate ct action, then we don't need to
999 * actually run the packet through conntrack twice unless it's for a
1000 * different zone.
1001 */
1002 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
1003 if (!cached) {
1004 if (tcf_ct_flow_table_lookup(p, skb, family)) {
1005 skip_add = true;
1006 goto do_nat;
1007 }
1008
1009 /* Associate skb with specified zone. */
1010 if (tmpl) {
1011 nf_conntrack_put(skb_nfct(skb));
1012 nf_conntrack_get(&tmpl->ct_general);
1013 nf_ct_set(skb, tmpl, IP_CT_NEW);
1014 }
1015
1016 state.hook = NF_INET_PRE_ROUTING;
1017 state.net = net;
1018 state.pf = family;
1019 err = nf_conntrack_in(skb, &state);
1020 if (err != NF_ACCEPT)
1021 goto out_push;
1022 }
1023
1024do_nat:
1025 ct = nf_ct_get(skb, &ctinfo);
1026 if (!ct)
1027 goto out_push;
1028 nf_ct_deliver_cached_events(ct);
1029
1030 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1031 if (err != NF_ACCEPT)
1032 goto drop;
1033
1034 if (commit) {
1035 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1036 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1037
1038 /* This will take care of sending queued events
1039 * even if the connection is already confirmed.
1040 */
1041 if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1042 goto drop;
1043 }
1044
1045 if (!skip_add)
1046 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1047
1048out_push:
1049 skb_push_rcsum(skb, nh_ofs);
1050
1051 qdisc_skb_cb(skb)->post_ct = true;
1052out_clear:
1053 if (defrag)
1054 qdisc_skb_cb(skb)->pkt_len = skb->len;
1055 return retval;
1056
1057drop:
1058 tcf_action_inc_drop_qstats(&c->common);
1059 return TC_ACT_SHOT;
1060}
1061
1062static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1063 [TCA_CT_ACTION] = { .type = NLA_U16 },
1064 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1065 [TCA_CT_ZONE] = { .type = NLA_U16 },
1066 [TCA_CT_MARK] = { .type = NLA_U32 },
1067 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1068 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1069 .len = 128 / BITS_PER_BYTE },
1070 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1071 .len = 128 / BITS_PER_BYTE },
1072 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1073 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1074 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1075 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1076 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1077 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1078};
1079
1080static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1081 struct tc_ct *parm,
1082 struct nlattr **tb,
1083 struct netlink_ext_ack *extack)
1084{
1085 struct nf_nat_range2 *range;
1086
1087 if (!(p->ct_action & TCA_CT_ACT_NAT))
1088 return 0;
1089
1090 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1091 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1092 return -EOPNOTSUPP;
1093 }
1094
1095 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1096 return 0;
1097
1098 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1099 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1100 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1101 return -EOPNOTSUPP;
1102 }
1103
1104 range = &p->range;
1105 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1106 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1107
1108 p->ipv4_range = true;
1109 range->flags |= NF_NAT_RANGE_MAP_IPS;
1110 range->min_addr.ip =
1111 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1112
1113 range->max_addr.ip = max_attr ?
1114 nla_get_in_addr(max_attr) :
1115 range->min_addr.ip;
1116 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1117 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1118
1119 p->ipv4_range = false;
1120 range->flags |= NF_NAT_RANGE_MAP_IPS;
1121 range->min_addr.in6 =
1122 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1123
1124 range->max_addr.in6 = max_attr ?
1125 nla_get_in6_addr(max_attr) :
1126 range->min_addr.in6;
1127 }
1128
1129 if (tb[TCA_CT_NAT_PORT_MIN]) {
1130 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1131 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1132
1133 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1134 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1135 range->min_proto.all;
1136 }
1137
1138 return 0;
1139}
1140
1141static void tcf_ct_set_key_val(struct nlattr **tb,
1142 void *val, int val_type,
1143 void *mask, int mask_type,
1144 int len)
1145{
1146 if (!tb[val_type])
1147 return;
1148 nla_memcpy(val, tb[val_type], len);
1149
1150 if (!mask)
1151 return;
1152
1153 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1154 memset(mask, 0xff, len);
1155 else
1156 nla_memcpy(mask, tb[mask_type], len);
1157}
1158
1159static int tcf_ct_fill_params(struct net *net,
1160 struct tcf_ct_params *p,
1161 struct tc_ct *parm,
1162 struct nlattr **tb,
1163 struct netlink_ext_ack *extack)
1164{
1165 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1166 struct nf_conntrack_zone zone;
1167 struct nf_conn *tmpl;
1168 int err;
1169
1170 p->zone = NF_CT_DEFAULT_ZONE_ID;
1171
1172 tcf_ct_set_key_val(tb,
1173 &p->ct_action, TCA_CT_ACTION,
1174 NULL, TCA_CT_UNSPEC,
1175 sizeof(p->ct_action));
1176
1177 if (p->ct_action & TCA_CT_ACT_CLEAR)
1178 return 0;
1179
1180 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1181 if (err)
1182 return err;
1183
1184 if (tb[TCA_CT_MARK]) {
1185 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1186 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1187 return -EOPNOTSUPP;
1188 }
1189 tcf_ct_set_key_val(tb,
1190 &p->mark, TCA_CT_MARK,
1191 &p->mark_mask, TCA_CT_MARK_MASK,
1192 sizeof(p->mark));
1193 }
1194
1195 if (tb[TCA_CT_LABELS]) {
1196 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1197 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1198 return -EOPNOTSUPP;
1199 }
1200
1201 if (!tn->labels) {
1202 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1203 return -EOPNOTSUPP;
1204 }
1205 tcf_ct_set_key_val(tb,
1206 p->labels, TCA_CT_LABELS,
1207 p->labels_mask, TCA_CT_LABELS_MASK,
1208 sizeof(p->labels));
1209 }
1210
1211 if (tb[TCA_CT_ZONE]) {
1212 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1213 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1214 return -EOPNOTSUPP;
1215 }
1216
1217 tcf_ct_set_key_val(tb,
1218 &p->zone, TCA_CT_ZONE,
1219 NULL, TCA_CT_UNSPEC,
1220 sizeof(p->zone));
1221 }
1222
1223 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1224 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1225 if (!tmpl) {
1226 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1227 return -ENOMEM;
1228 }
1229 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1230 nf_conntrack_get(&tmpl->ct_general);
1231 p->tmpl = tmpl;
1232
1233 return 0;
1234}
1235
1236static int tcf_ct_init(struct net *net, struct nlattr *nla,
1237 struct nlattr *est, struct tc_action **a,
1238 int replace, int bind, bool rtnl_held,
1239 struct tcf_proto *tp, u32 flags,
1240 struct netlink_ext_ack *extack)
1241{
1242 struct tc_action_net *tn = net_generic(net, ct_net_id);
1243 struct tcf_ct_params *params = NULL;
1244 struct nlattr *tb[TCA_CT_MAX + 1];
1245 struct tcf_chain *goto_ch = NULL;
1246 struct tc_ct *parm;
1247 struct tcf_ct *c;
1248 int err, res = 0;
1249 u32 index;
1250
1251 if (!nla) {
1252 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1253 return -EINVAL;
1254 }
1255
1256 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1257 if (err < 0)
1258 return err;
1259
1260 if (!tb[TCA_CT_PARMS]) {
1261 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1262 return -EINVAL;
1263 }
1264 parm = nla_data(tb[TCA_CT_PARMS]);
1265 index = parm->index;
1266 err = tcf_idr_check_alloc(tn, &index, a, bind);
1267 if (err < 0)
1268 return err;
1269
1270 if (!err) {
1271 err = tcf_idr_create_from_flags(tn, index, est, a,
1272 &act_ct_ops, bind, flags);
1273 if (err) {
1274 tcf_idr_cleanup(tn, index);
1275 return err;
1276 }
1277 res = ACT_P_CREATED;
1278 } else {
1279 if (bind)
1280 return 0;
1281
1282 if (!replace) {
1283 tcf_idr_release(*a, bind);
1284 return -EEXIST;
1285 }
1286 }
1287 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1288 if (err < 0)
1289 goto cleanup;
1290
1291 c = to_ct(*a);
1292
1293 params = kzalloc(sizeof(*params), GFP_KERNEL);
1294 if (unlikely(!params)) {
1295 err = -ENOMEM;
1296 goto cleanup;
1297 }
1298
1299 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1300 if (err)
1301 goto cleanup;
1302
1303 err = tcf_ct_flow_table_get(params);
1304 if (err)
1305 goto cleanup;
1306
1307 spin_lock_bh(&c->tcf_lock);
1308 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1309 params = rcu_replace_pointer(c->params, params,
1310 lockdep_is_held(&c->tcf_lock));
1311 spin_unlock_bh(&c->tcf_lock);
1312
1313 if (goto_ch)
1314 tcf_chain_put_by_act(goto_ch);
1315 if (params)
1316 call_rcu(¶ms->rcu, tcf_ct_params_free);
1317
1318 return res;
1319
1320cleanup:
1321 if (goto_ch)
1322 tcf_chain_put_by_act(goto_ch);
1323 kfree(params);
1324 tcf_idr_release(*a, bind);
1325 return err;
1326}
1327
1328static void tcf_ct_cleanup(struct tc_action *a)
1329{
1330 struct tcf_ct_params *params;
1331 struct tcf_ct *c = to_ct(a);
1332
1333 params = rcu_dereference_protected(c->params, 1);
1334 if (params)
1335 call_rcu(¶ms->rcu, tcf_ct_params_free);
1336}
1337
1338static int tcf_ct_dump_key_val(struct sk_buff *skb,
1339 void *val, int val_type,
1340 void *mask, int mask_type,
1341 int len)
1342{
1343 int err;
1344
1345 if (mask && !memchr_inv(mask, 0, len))
1346 return 0;
1347
1348 err = nla_put(skb, val_type, len, val);
1349 if (err)
1350 return err;
1351
1352 if (mask_type != TCA_CT_UNSPEC) {
1353 err = nla_put(skb, mask_type, len, mask);
1354 if (err)
1355 return err;
1356 }
1357
1358 return 0;
1359}
1360
1361static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1362{
1363 struct nf_nat_range2 *range = &p->range;
1364
1365 if (!(p->ct_action & TCA_CT_ACT_NAT))
1366 return 0;
1367
1368 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1369 return 0;
1370
1371 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1372 if (p->ipv4_range) {
1373 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1374 range->min_addr.ip))
1375 return -1;
1376 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1377 range->max_addr.ip))
1378 return -1;
1379 } else {
1380 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1381 &range->min_addr.in6))
1382 return -1;
1383 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1384 &range->max_addr.in6))
1385 return -1;
1386 }
1387 }
1388
1389 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1390 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1391 range->min_proto.all))
1392 return -1;
1393 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1394 range->max_proto.all))
1395 return -1;
1396 }
1397
1398 return 0;
1399}
1400
1401static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1402 int bind, int ref)
1403{
1404 unsigned char *b = skb_tail_pointer(skb);
1405 struct tcf_ct *c = to_ct(a);
1406 struct tcf_ct_params *p;
1407
1408 struct tc_ct opt = {
1409 .index = c->tcf_index,
1410 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1411 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1412 };
1413 struct tcf_t t;
1414
1415 spin_lock_bh(&c->tcf_lock);
1416 p = rcu_dereference_protected(c->params,
1417 lockdep_is_held(&c->tcf_lock));
1418 opt.action = c->tcf_action;
1419
1420 if (tcf_ct_dump_key_val(skb,
1421 &p->ct_action, TCA_CT_ACTION,
1422 NULL, TCA_CT_UNSPEC,
1423 sizeof(p->ct_action)))
1424 goto nla_put_failure;
1425
1426 if (p->ct_action & TCA_CT_ACT_CLEAR)
1427 goto skip_dump;
1428
1429 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1430 tcf_ct_dump_key_val(skb,
1431 &p->mark, TCA_CT_MARK,
1432 &p->mark_mask, TCA_CT_MARK_MASK,
1433 sizeof(p->mark)))
1434 goto nla_put_failure;
1435
1436 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1437 tcf_ct_dump_key_val(skb,
1438 p->labels, TCA_CT_LABELS,
1439 p->labels_mask, TCA_CT_LABELS_MASK,
1440 sizeof(p->labels)))
1441 goto nla_put_failure;
1442
1443 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1444 tcf_ct_dump_key_val(skb,
1445 &p->zone, TCA_CT_ZONE,
1446 NULL, TCA_CT_UNSPEC,
1447 sizeof(p->zone)))
1448 goto nla_put_failure;
1449
1450 if (tcf_ct_dump_nat(skb, p))
1451 goto nla_put_failure;
1452
1453skip_dump:
1454 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1455 goto nla_put_failure;
1456
1457 tcf_tm_dump(&t, &c->tcf_tm);
1458 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1459 goto nla_put_failure;
1460 spin_unlock_bh(&c->tcf_lock);
1461
1462 return skb->len;
1463nla_put_failure:
1464 spin_unlock_bh(&c->tcf_lock);
1465 nlmsg_trim(skb, b);
1466 return -1;
1467}
1468
1469static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1470 struct netlink_callback *cb, int type,
1471 const struct tc_action_ops *ops,
1472 struct netlink_ext_ack *extack)
1473{
1474 struct tc_action_net *tn = net_generic(net, ct_net_id);
1475
1476 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1477}
1478
1479static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1480{
1481 struct tc_action_net *tn = net_generic(net, ct_net_id);
1482
1483 return tcf_idr_search(tn, a, index);
1484}
1485
1486static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1487 u64 drops, u64 lastuse, bool hw)
1488{
1489 struct tcf_ct *c = to_ct(a);
1490
1491 tcf_action_update_stats(a, bytes, packets, drops, hw);
1492 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1493}
1494
1495static struct tc_action_ops act_ct_ops = {
1496 .kind = "ct",
1497 .id = TCA_ID_CT,
1498 .owner = THIS_MODULE,
1499 .act = tcf_ct_act,
1500 .dump = tcf_ct_dump,
1501 .init = tcf_ct_init,
1502 .cleanup = tcf_ct_cleanup,
1503 .walk = tcf_ct_walker,
1504 .lookup = tcf_ct_search,
1505 .stats_update = tcf_stats_update,
1506 .size = sizeof(struct tcf_ct),
1507};
1508
1509static __net_init int ct_init_net(struct net *net)
1510{
1511 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1512 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1513
1514 if (nf_connlabels_get(net, n_bits - 1)) {
1515 tn->labels = false;
1516 pr_err("act_ct: Failed to set connlabels length");
1517 } else {
1518 tn->labels = true;
1519 }
1520
1521 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1522}
1523
1524static void __net_exit ct_exit_net(struct list_head *net_list)
1525{
1526 struct net *net;
1527
1528 rtnl_lock();
1529 list_for_each_entry(net, net_list, exit_list) {
1530 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1531
1532 if (tn->labels)
1533 nf_connlabels_put(net);
1534 }
1535 rtnl_unlock();
1536
1537 tc_action_net_exit(net_list, ct_net_id);
1538}
1539
1540static struct pernet_operations ct_net_ops = {
1541 .init = ct_init_net,
1542 .exit_batch = ct_exit_net,
1543 .id = &ct_net_id,
1544 .size = sizeof(struct tc_ct_action_net),
1545};
1546
1547static int __init ct_init_module(void)
1548{
1549 int err;
1550
1551 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1552 if (!act_ct_wq)
1553 return -ENOMEM;
1554
1555 err = tcf_ct_flow_tables_init();
1556 if (err)
1557 goto err_tbl_init;
1558
1559 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1560 if (err)
1561 goto err_register;
1562
1563 static_branch_inc(&tcf_frag_xmit_count);
1564
1565 return 0;
1566
1567err_register:
1568 tcf_ct_flow_tables_uninit();
1569err_tbl_init:
1570 destroy_workqueue(act_ct_wq);
1571 return err;
1572}
1573
1574static void __exit ct_cleanup_module(void)
1575{
1576 static_branch_dec(&tcf_frag_xmit_count);
1577 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1578 tcf_ct_flow_tables_uninit();
1579 destroy_workqueue(act_ct_wq);
1580}
1581
1582module_init(ct_init_module);
1583module_exit(ct_cleanup_module);
1584MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1585MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1586MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1587MODULE_DESCRIPTION("Connection tracking action");
1588MODULE_LICENSE("GPL v2");