Loading...
1/*
2 * Checksum updating actions
3 *
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18
19#include <linux/netlink.h>
20#include <net/netlink.h>
21#include <linux/rtnetlink.h>
22
23#include <linux/skbuff.h>
24
25#include <net/ip.h>
26#include <net/ipv6.h>
27#include <net/icmp.h>
28#include <linux/icmpv6.h>
29#include <linux/igmp.h>
30#include <net/tcp.h>
31#include <net/udp.h>
32#include <net/ip6_checksum.h>
33
34#include <net/act_api.h>
35
36#include <linux/tc_act/tc_csum.h>
37#include <net/tc_act/tc_csum.h>
38
39#define CSUM_TAB_MASK 15
40
41static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
42 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
43};
44
45static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
46 struct tc_action *a, int ovr, int bind)
47{
48 struct nlattr *tb[TCA_CSUM_MAX + 1];
49 struct tc_csum *parm;
50 struct tcf_csum *p;
51 int ret = 0, err;
52
53 if (nla == NULL)
54 return -EINVAL;
55
56 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
57 if (err < 0)
58 return err;
59
60 if (tb[TCA_CSUM_PARMS] == NULL)
61 return -EINVAL;
62 parm = nla_data(tb[TCA_CSUM_PARMS]);
63
64 if (!tcf_hash_check(parm->index, a, bind)) {
65 ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
66 if (ret)
67 return ret;
68 ret = ACT_P_CREATED;
69 } else {
70 if (bind)/* dont override defaults */
71 return 0;
72 tcf_hash_release(a, bind);
73 if (!ovr)
74 return -EEXIST;
75 }
76
77 p = to_tcf_csum(a);
78 spin_lock_bh(&p->tcf_lock);
79 p->tcf_action = parm->action;
80 p->update_flags = parm->update_flags;
81 spin_unlock_bh(&p->tcf_lock);
82
83 if (ret == ACT_P_CREATED)
84 tcf_hash_insert(a);
85
86 return ret;
87}
88
89/**
90 * tcf_csum_skb_nextlayer - Get next layer pointer
91 * @skb: sk_buff to use
92 * @ihl: previous summed headers length
93 * @ipl: complete packet length
94 * @jhl: next header length
95 *
96 * Check the expected next layer availability in the specified sk_buff.
97 * Return the next layer pointer if pass, NULL otherwise.
98 */
99static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
100 unsigned int ihl, unsigned int ipl,
101 unsigned int jhl)
102{
103 int ntkoff = skb_network_offset(skb);
104 int hl = ihl + jhl;
105
106 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
107 (skb_cloned(skb) &&
108 !skb_clone_writable(skb, hl + ntkoff) &&
109 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
110 return NULL;
111 else
112 return (void *)(skb_network_header(skb) + ihl);
113}
114
115static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
116 unsigned int ihl, unsigned int ipl)
117{
118 struct icmphdr *icmph;
119
120 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
121 if (icmph == NULL)
122 return 0;
123
124 icmph->checksum = 0;
125 skb->csum = csum_partial(icmph, ipl - ihl, 0);
126 icmph->checksum = csum_fold(skb->csum);
127
128 skb->ip_summed = CHECKSUM_NONE;
129
130 return 1;
131}
132
133static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
134 unsigned int ihl, unsigned int ipl)
135{
136 struct igmphdr *igmph;
137
138 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
139 if (igmph == NULL)
140 return 0;
141
142 igmph->csum = 0;
143 skb->csum = csum_partial(igmph, ipl - ihl, 0);
144 igmph->csum = csum_fold(skb->csum);
145
146 skb->ip_summed = CHECKSUM_NONE;
147
148 return 1;
149}
150
151static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
152 unsigned int ihl, unsigned int ipl)
153{
154 struct icmp6hdr *icmp6h;
155 const struct ipv6hdr *ip6h;
156
157 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
158 if (icmp6h == NULL)
159 return 0;
160
161 ip6h = ipv6_hdr(skb);
162 icmp6h->icmp6_cksum = 0;
163 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
164 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
165 ipl - ihl, IPPROTO_ICMPV6,
166 skb->csum);
167
168 skb->ip_summed = CHECKSUM_NONE;
169
170 return 1;
171}
172
173static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
174 unsigned int ihl, unsigned int ipl)
175{
176 struct tcphdr *tcph;
177 const struct iphdr *iph;
178
179 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
180 if (tcph == NULL)
181 return 0;
182
183 iph = ip_hdr(skb);
184 tcph->check = 0;
185 skb->csum = csum_partial(tcph, ipl - ihl, 0);
186 tcph->check = tcp_v4_check(ipl - ihl,
187 iph->saddr, iph->daddr, skb->csum);
188
189 skb->ip_summed = CHECKSUM_NONE;
190
191 return 1;
192}
193
194static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
195 unsigned int ihl, unsigned int ipl)
196{
197 struct tcphdr *tcph;
198 const struct ipv6hdr *ip6h;
199
200 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
201 if (tcph == NULL)
202 return 0;
203
204 ip6h = ipv6_hdr(skb);
205 tcph->check = 0;
206 skb->csum = csum_partial(tcph, ipl - ihl, 0);
207 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
208 ipl - ihl, IPPROTO_TCP,
209 skb->csum);
210
211 skb->ip_summed = CHECKSUM_NONE;
212
213 return 1;
214}
215
216static int tcf_csum_ipv4_udp(struct sk_buff *skb,
217 unsigned int ihl, unsigned int ipl, int udplite)
218{
219 struct udphdr *udph;
220 const struct iphdr *iph;
221 u16 ul;
222
223 /*
224 * Support both UDP and UDPLITE checksum algorithms, Don't use
225 * udph->len to get the real length without any protocol check,
226 * UDPLITE uses udph->len for another thing,
227 * Use iph->tot_len, or just ipl.
228 */
229
230 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
231 if (udph == NULL)
232 return 0;
233
234 iph = ip_hdr(skb);
235 ul = ntohs(udph->len);
236
237 if (udplite || udph->check) {
238
239 udph->check = 0;
240
241 if (udplite) {
242 if (ul == 0)
243 skb->csum = csum_partial(udph, ipl - ihl, 0);
244 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
245 skb->csum = csum_partial(udph, ul, 0);
246 else
247 goto ignore_obscure_skb;
248 } else {
249 if (ul != ipl - ihl)
250 goto ignore_obscure_skb;
251
252 skb->csum = csum_partial(udph, ul, 0);
253 }
254
255 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
256 ul, iph->protocol,
257 skb->csum);
258
259 if (!udph->check)
260 udph->check = CSUM_MANGLED_0;
261 }
262
263 skb->ip_summed = CHECKSUM_NONE;
264
265ignore_obscure_skb:
266 return 1;
267}
268
269static int tcf_csum_ipv6_udp(struct sk_buff *skb,
270 unsigned int ihl, unsigned int ipl, int udplite)
271{
272 struct udphdr *udph;
273 const struct ipv6hdr *ip6h;
274 u16 ul;
275
276 /*
277 * Support both UDP and UDPLITE checksum algorithms, Don't use
278 * udph->len to get the real length without any protocol check,
279 * UDPLITE uses udph->len for another thing,
280 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
281 */
282
283 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
284 if (udph == NULL)
285 return 0;
286
287 ip6h = ipv6_hdr(skb);
288 ul = ntohs(udph->len);
289
290 udph->check = 0;
291
292 if (udplite) {
293 if (ul == 0)
294 skb->csum = csum_partial(udph, ipl - ihl, 0);
295
296 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
297 skb->csum = csum_partial(udph, ul, 0);
298
299 else
300 goto ignore_obscure_skb;
301 } else {
302 if (ul != ipl - ihl)
303 goto ignore_obscure_skb;
304
305 skb->csum = csum_partial(udph, ul, 0);
306 }
307
308 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
309 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
310 skb->csum);
311
312 if (!udph->check)
313 udph->check = CSUM_MANGLED_0;
314
315 skb->ip_summed = CHECKSUM_NONE;
316
317ignore_obscure_skb:
318 return 1;
319}
320
321static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
322{
323 const struct iphdr *iph;
324 int ntkoff;
325
326 ntkoff = skb_network_offset(skb);
327
328 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
329 goto fail;
330
331 iph = ip_hdr(skb);
332
333 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
334 case IPPROTO_ICMP:
335 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
336 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
337 ntohs(iph->tot_len)))
338 goto fail;
339 break;
340 case IPPROTO_IGMP:
341 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
342 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
343 ntohs(iph->tot_len)))
344 goto fail;
345 break;
346 case IPPROTO_TCP:
347 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
348 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
349 ntohs(iph->tot_len)))
350 goto fail;
351 break;
352 case IPPROTO_UDP:
353 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
354 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
355 ntohs(iph->tot_len), 0))
356 goto fail;
357 break;
358 case IPPROTO_UDPLITE:
359 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
360 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
361 ntohs(iph->tot_len), 1))
362 goto fail;
363 break;
364 }
365
366 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
367 if (skb_cloned(skb) &&
368 !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
369 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
370 goto fail;
371
372 ip_send_check(ip_hdr(skb));
373 }
374
375 return 1;
376
377fail:
378 return 0;
379}
380
381static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
382 unsigned int ixhl, unsigned int *pl)
383{
384 int off, len, optlen;
385 unsigned char *xh = (void *)ip6xh;
386
387 off = sizeof(*ip6xh);
388 len = ixhl - off;
389
390 while (len > 1) {
391 switch (xh[off]) {
392 case IPV6_TLV_PAD1:
393 optlen = 1;
394 break;
395 case IPV6_TLV_JUMBO:
396 optlen = xh[off + 1] + 2;
397 if (optlen != 6 || len < 6 || (off & 3) != 2)
398 /* wrong jumbo option length/alignment */
399 return 0;
400 *pl = ntohl(*(__be32 *)(xh + off + 2));
401 goto done;
402 default:
403 optlen = xh[off + 1] + 2;
404 if (optlen > len)
405 /* ignore obscure options */
406 goto done;
407 break;
408 }
409 off += optlen;
410 len -= optlen;
411 }
412
413done:
414 return 1;
415}
416
417static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
418{
419 struct ipv6hdr *ip6h;
420 struct ipv6_opt_hdr *ip6xh;
421 unsigned int hl, ixhl;
422 unsigned int pl;
423 int ntkoff;
424 u8 nexthdr;
425
426 ntkoff = skb_network_offset(skb);
427
428 hl = sizeof(*ip6h);
429
430 if (!pskb_may_pull(skb, hl + ntkoff))
431 goto fail;
432
433 ip6h = ipv6_hdr(skb);
434
435 pl = ntohs(ip6h->payload_len);
436 nexthdr = ip6h->nexthdr;
437
438 do {
439 switch (nexthdr) {
440 case NEXTHDR_FRAGMENT:
441 goto ignore_skb;
442 case NEXTHDR_ROUTING:
443 case NEXTHDR_HOP:
444 case NEXTHDR_DEST:
445 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
446 goto fail;
447 ip6xh = (void *)(skb_network_header(skb) + hl);
448 ixhl = ipv6_optlen(ip6xh);
449 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
450 goto fail;
451 ip6xh = (void *)(skb_network_header(skb) + hl);
452 if ((nexthdr == NEXTHDR_HOP) &&
453 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
454 goto fail;
455 nexthdr = ip6xh->nexthdr;
456 hl += ixhl;
457 break;
458 case IPPROTO_ICMPV6:
459 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
460 if (!tcf_csum_ipv6_icmp(skb,
461 hl, pl + sizeof(*ip6h)))
462 goto fail;
463 goto done;
464 case IPPROTO_TCP:
465 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
466 if (!tcf_csum_ipv6_tcp(skb,
467 hl, pl + sizeof(*ip6h)))
468 goto fail;
469 goto done;
470 case IPPROTO_UDP:
471 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
472 if (!tcf_csum_ipv6_udp(skb, hl,
473 pl + sizeof(*ip6h), 0))
474 goto fail;
475 goto done;
476 case IPPROTO_UDPLITE:
477 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
478 if (!tcf_csum_ipv6_udp(skb, hl,
479 pl + sizeof(*ip6h), 1))
480 goto fail;
481 goto done;
482 default:
483 goto ignore_skb;
484 }
485 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
486
487done:
488ignore_skb:
489 return 1;
490
491fail:
492 return 0;
493}
494
495static int tcf_csum(struct sk_buff *skb,
496 const struct tc_action *a, struct tcf_result *res)
497{
498 struct tcf_csum *p = a->priv;
499 int action;
500 u32 update_flags;
501
502 spin_lock(&p->tcf_lock);
503 p->tcf_tm.lastuse = jiffies;
504 bstats_update(&p->tcf_bstats, skb);
505 action = p->tcf_action;
506 update_flags = p->update_flags;
507 spin_unlock(&p->tcf_lock);
508
509 if (unlikely(action == TC_ACT_SHOT))
510 goto drop;
511
512 switch (skb->protocol) {
513 case cpu_to_be16(ETH_P_IP):
514 if (!tcf_csum_ipv4(skb, update_flags))
515 goto drop;
516 break;
517 case cpu_to_be16(ETH_P_IPV6):
518 if (!tcf_csum_ipv6(skb, update_flags))
519 goto drop;
520 break;
521 }
522
523 return action;
524
525drop:
526 spin_lock(&p->tcf_lock);
527 p->tcf_qstats.drops++;
528 spin_unlock(&p->tcf_lock);
529 return TC_ACT_SHOT;
530}
531
532static int tcf_csum_dump(struct sk_buff *skb,
533 struct tc_action *a, int bind, int ref)
534{
535 unsigned char *b = skb_tail_pointer(skb);
536 struct tcf_csum *p = a->priv;
537 struct tc_csum opt = {
538 .update_flags = p->update_flags,
539 .index = p->tcf_index,
540 .action = p->tcf_action,
541 .refcnt = p->tcf_refcnt - ref,
542 .bindcnt = p->tcf_bindcnt - bind,
543 };
544 struct tcf_t t;
545
546 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
547 goto nla_put_failure;
548 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
549 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
550 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
551 if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
552 goto nla_put_failure;
553
554 return skb->len;
555
556nla_put_failure:
557 nlmsg_trim(skb, b);
558 return -1;
559}
560
561static struct tc_action_ops act_csum_ops = {
562 .kind = "csum",
563 .type = TCA_ACT_CSUM,
564 .owner = THIS_MODULE,
565 .act = tcf_csum,
566 .dump = tcf_csum_dump,
567 .init = tcf_csum_init,
568};
569
570MODULE_DESCRIPTION("Checksum updating actions");
571MODULE_LICENSE("GPL");
572
573static int __init csum_init_module(void)
574{
575 return tcf_register_action(&act_csum_ops, CSUM_TAB_MASK);
576}
577
578static void __exit csum_cleanup_module(void)
579{
580 tcf_unregister_action(&act_csum_ops);
581}
582
583module_init(csum_init_module);
584module_exit(csum_cleanup_module);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Checksum updating actions
4 *
5 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 */
7
8#include <linux/types.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/spinlock.h>
13
14#include <linux/netlink.h>
15#include <net/netlink.h>
16#include <linux/rtnetlink.h>
17
18#include <linux/skbuff.h>
19
20#include <net/ip.h>
21#include <net/ipv6.h>
22#include <net/icmp.h>
23#include <linux/icmpv6.h>
24#include <linux/igmp.h>
25#include <net/tcp.h>
26#include <net/udp.h>
27#include <net/ip6_checksum.h>
28#include <net/sctp/checksum.h>
29
30#include <net/act_api.h>
31#include <net/pkt_cls.h>
32
33#include <linux/tc_act/tc_csum.h>
34#include <net/tc_act/tc_csum.h>
35
36static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
37 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
38};
39
40static unsigned int csum_net_id;
41static struct tc_action_ops act_csum_ops;
42
43static int tcf_csum_init(struct net *net, struct nlattr *nla,
44 struct nlattr *est, struct tc_action **a, int ovr,
45 int bind, bool rtnl_held, struct tcf_proto *tp,
46 u32 flags, struct netlink_ext_ack *extack)
47{
48 struct tc_action_net *tn = net_generic(net, csum_net_id);
49 struct tcf_csum_params *params_new;
50 struct nlattr *tb[TCA_CSUM_MAX + 1];
51 struct tcf_chain *goto_ch = NULL;
52 struct tc_csum *parm;
53 struct tcf_csum *p;
54 int ret = 0, err;
55 u32 index;
56
57 if (nla == NULL)
58 return -EINVAL;
59
60 err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
61 NULL);
62 if (err < 0)
63 return err;
64
65 if (tb[TCA_CSUM_PARMS] == NULL)
66 return -EINVAL;
67 parm = nla_data(tb[TCA_CSUM_PARMS]);
68 index = parm->index;
69 err = tcf_idr_check_alloc(tn, &index, a, bind);
70 if (!err) {
71 ret = tcf_idr_create_from_flags(tn, index, est, a,
72 &act_csum_ops, bind, flags);
73 if (ret) {
74 tcf_idr_cleanup(tn, index);
75 return ret;
76 }
77 ret = ACT_P_CREATED;
78 } else if (err > 0) {
79 if (bind)/* dont override defaults */
80 return 0;
81 if (!ovr) {
82 tcf_idr_release(*a, bind);
83 return -EEXIST;
84 }
85 } else {
86 return err;
87 }
88
89 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
90 if (err < 0)
91 goto release_idr;
92
93 p = to_tcf_csum(*a);
94
95 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
96 if (unlikely(!params_new)) {
97 err = -ENOMEM;
98 goto put_chain;
99 }
100 params_new->update_flags = parm->update_flags;
101
102 spin_lock_bh(&p->tcf_lock);
103 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
104 params_new = rcu_replace_pointer(p->params, params_new,
105 lockdep_is_held(&p->tcf_lock));
106 spin_unlock_bh(&p->tcf_lock);
107
108 if (goto_ch)
109 tcf_chain_put_by_act(goto_ch);
110 if (params_new)
111 kfree_rcu(params_new, rcu);
112
113 return ret;
114put_chain:
115 if (goto_ch)
116 tcf_chain_put_by_act(goto_ch);
117release_idr:
118 tcf_idr_release(*a, bind);
119 return err;
120}
121
122/**
123 * tcf_csum_skb_nextlayer - Get next layer pointer
124 * @skb: sk_buff to use
125 * @ihl: previous summed headers length
126 * @ipl: complete packet length
127 * @jhl: next header length
128 *
129 * Check the expected next layer availability in the specified sk_buff.
130 * Return the next layer pointer if pass, NULL otherwise.
131 */
132static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
133 unsigned int ihl, unsigned int ipl,
134 unsigned int jhl)
135{
136 int ntkoff = skb_network_offset(skb);
137 int hl = ihl + jhl;
138
139 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
140 skb_try_make_writable(skb, hl + ntkoff))
141 return NULL;
142 else
143 return (void *)(skb_network_header(skb) + ihl);
144}
145
146static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
147 unsigned int ipl)
148{
149 struct icmphdr *icmph;
150
151 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
152 if (icmph == NULL)
153 return 0;
154
155 icmph->checksum = 0;
156 skb->csum = csum_partial(icmph, ipl - ihl, 0);
157 icmph->checksum = csum_fold(skb->csum);
158
159 skb->ip_summed = CHECKSUM_NONE;
160
161 return 1;
162}
163
164static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
165 unsigned int ihl, unsigned int ipl)
166{
167 struct igmphdr *igmph;
168
169 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
170 if (igmph == NULL)
171 return 0;
172
173 igmph->csum = 0;
174 skb->csum = csum_partial(igmph, ipl - ihl, 0);
175 igmph->csum = csum_fold(skb->csum);
176
177 skb->ip_summed = CHECKSUM_NONE;
178
179 return 1;
180}
181
182static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
183 unsigned int ipl)
184{
185 struct icmp6hdr *icmp6h;
186 const struct ipv6hdr *ip6h;
187
188 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
189 if (icmp6h == NULL)
190 return 0;
191
192 ip6h = ipv6_hdr(skb);
193 icmp6h->icmp6_cksum = 0;
194 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
195 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
196 ipl - ihl, IPPROTO_ICMPV6,
197 skb->csum);
198
199 skb->ip_summed = CHECKSUM_NONE;
200
201 return 1;
202}
203
204static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
205 unsigned int ipl)
206{
207 struct tcphdr *tcph;
208 const struct iphdr *iph;
209
210 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
211 return 1;
212
213 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
214 if (tcph == NULL)
215 return 0;
216
217 iph = ip_hdr(skb);
218 tcph->check = 0;
219 skb->csum = csum_partial(tcph, ipl - ihl, 0);
220 tcph->check = tcp_v4_check(ipl - ihl,
221 iph->saddr, iph->daddr, skb->csum);
222
223 skb->ip_summed = CHECKSUM_NONE;
224
225 return 1;
226}
227
228static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
229 unsigned int ipl)
230{
231 struct tcphdr *tcph;
232 const struct ipv6hdr *ip6h;
233
234 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
235 return 1;
236
237 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
238 if (tcph == NULL)
239 return 0;
240
241 ip6h = ipv6_hdr(skb);
242 tcph->check = 0;
243 skb->csum = csum_partial(tcph, ipl - ihl, 0);
244 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
245 ipl - ihl, IPPROTO_TCP,
246 skb->csum);
247
248 skb->ip_summed = CHECKSUM_NONE;
249
250 return 1;
251}
252
253static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
254 unsigned int ipl, int udplite)
255{
256 struct udphdr *udph;
257 const struct iphdr *iph;
258 u16 ul;
259
260 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
261 return 1;
262
263 /*
264 * Support both UDP and UDPLITE checksum algorithms, Don't use
265 * udph->len to get the real length without any protocol check,
266 * UDPLITE uses udph->len for another thing,
267 * Use iph->tot_len, or just ipl.
268 */
269
270 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
271 if (udph == NULL)
272 return 0;
273
274 iph = ip_hdr(skb);
275 ul = ntohs(udph->len);
276
277 if (udplite || udph->check) {
278
279 udph->check = 0;
280
281 if (udplite) {
282 if (ul == 0)
283 skb->csum = csum_partial(udph, ipl - ihl, 0);
284 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
285 skb->csum = csum_partial(udph, ul, 0);
286 else
287 goto ignore_obscure_skb;
288 } else {
289 if (ul != ipl - ihl)
290 goto ignore_obscure_skb;
291
292 skb->csum = csum_partial(udph, ul, 0);
293 }
294
295 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
296 ul, iph->protocol,
297 skb->csum);
298
299 if (!udph->check)
300 udph->check = CSUM_MANGLED_0;
301 }
302
303 skb->ip_summed = CHECKSUM_NONE;
304
305ignore_obscure_skb:
306 return 1;
307}
308
309static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
310 unsigned int ipl, int udplite)
311{
312 struct udphdr *udph;
313 const struct ipv6hdr *ip6h;
314 u16 ul;
315
316 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
317 return 1;
318
319 /*
320 * Support both UDP and UDPLITE checksum algorithms, Don't use
321 * udph->len to get the real length without any protocol check,
322 * UDPLITE uses udph->len for another thing,
323 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
324 */
325
326 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
327 if (udph == NULL)
328 return 0;
329
330 ip6h = ipv6_hdr(skb);
331 ul = ntohs(udph->len);
332
333 udph->check = 0;
334
335 if (udplite) {
336 if (ul == 0)
337 skb->csum = csum_partial(udph, ipl - ihl, 0);
338
339 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
340 skb->csum = csum_partial(udph, ul, 0);
341
342 else
343 goto ignore_obscure_skb;
344 } else {
345 if (ul != ipl - ihl)
346 goto ignore_obscure_skb;
347
348 skb->csum = csum_partial(udph, ul, 0);
349 }
350
351 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
352 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
353 skb->csum);
354
355 if (!udph->check)
356 udph->check = CSUM_MANGLED_0;
357
358 skb->ip_summed = CHECKSUM_NONE;
359
360ignore_obscure_skb:
361 return 1;
362}
363
364static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
365 unsigned int ipl)
366{
367 struct sctphdr *sctph;
368
369 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
370 return 1;
371
372 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
373 if (!sctph)
374 return 0;
375
376 sctph->checksum = sctp_compute_cksum(skb,
377 skb_network_offset(skb) + ihl);
378 skb->ip_summed = CHECKSUM_NONE;
379 skb->csum_not_inet = 0;
380
381 return 1;
382}
383
384static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
385{
386 const struct iphdr *iph;
387 int ntkoff;
388
389 ntkoff = skb_network_offset(skb);
390
391 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
392 goto fail;
393
394 iph = ip_hdr(skb);
395
396 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
397 case IPPROTO_ICMP:
398 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
399 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
400 ntohs(iph->tot_len)))
401 goto fail;
402 break;
403 case IPPROTO_IGMP:
404 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
405 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
406 ntohs(iph->tot_len)))
407 goto fail;
408 break;
409 case IPPROTO_TCP:
410 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
411 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
412 ntohs(iph->tot_len)))
413 goto fail;
414 break;
415 case IPPROTO_UDP:
416 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
417 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
418 ntohs(iph->tot_len), 0))
419 goto fail;
420 break;
421 case IPPROTO_UDPLITE:
422 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
423 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
424 ntohs(iph->tot_len), 1))
425 goto fail;
426 break;
427 case IPPROTO_SCTP:
428 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
429 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
430 goto fail;
431 break;
432 }
433
434 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
435 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
436 goto fail;
437
438 ip_send_check(ip_hdr(skb));
439 }
440
441 return 1;
442
443fail:
444 return 0;
445}
446
447static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
448 unsigned int *pl)
449{
450 int off, len, optlen;
451 unsigned char *xh = (void *)ip6xh;
452
453 off = sizeof(*ip6xh);
454 len = ixhl - off;
455
456 while (len > 1) {
457 switch (xh[off]) {
458 case IPV6_TLV_PAD1:
459 optlen = 1;
460 break;
461 case IPV6_TLV_JUMBO:
462 optlen = xh[off + 1] + 2;
463 if (optlen != 6 || len < 6 || (off & 3) != 2)
464 /* wrong jumbo option length/alignment */
465 return 0;
466 *pl = ntohl(*(__be32 *)(xh + off + 2));
467 goto done;
468 default:
469 optlen = xh[off + 1] + 2;
470 if (optlen > len)
471 /* ignore obscure options */
472 goto done;
473 break;
474 }
475 off += optlen;
476 len -= optlen;
477 }
478
479done:
480 return 1;
481}
482
483static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
484{
485 struct ipv6hdr *ip6h;
486 struct ipv6_opt_hdr *ip6xh;
487 unsigned int hl, ixhl;
488 unsigned int pl;
489 int ntkoff;
490 u8 nexthdr;
491
492 ntkoff = skb_network_offset(skb);
493
494 hl = sizeof(*ip6h);
495
496 if (!pskb_may_pull(skb, hl + ntkoff))
497 goto fail;
498
499 ip6h = ipv6_hdr(skb);
500
501 pl = ntohs(ip6h->payload_len);
502 nexthdr = ip6h->nexthdr;
503
504 do {
505 switch (nexthdr) {
506 case NEXTHDR_FRAGMENT:
507 goto ignore_skb;
508 case NEXTHDR_ROUTING:
509 case NEXTHDR_HOP:
510 case NEXTHDR_DEST:
511 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
512 goto fail;
513 ip6xh = (void *)(skb_network_header(skb) + hl);
514 ixhl = ipv6_optlen(ip6xh);
515 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
516 goto fail;
517 ip6xh = (void *)(skb_network_header(skb) + hl);
518 if ((nexthdr == NEXTHDR_HOP) &&
519 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
520 goto fail;
521 nexthdr = ip6xh->nexthdr;
522 hl += ixhl;
523 break;
524 case IPPROTO_ICMPV6:
525 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
526 if (!tcf_csum_ipv6_icmp(skb,
527 hl, pl + sizeof(*ip6h)))
528 goto fail;
529 goto done;
530 case IPPROTO_TCP:
531 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
532 if (!tcf_csum_ipv6_tcp(skb,
533 hl, pl + sizeof(*ip6h)))
534 goto fail;
535 goto done;
536 case IPPROTO_UDP:
537 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
538 if (!tcf_csum_ipv6_udp(skb, hl,
539 pl + sizeof(*ip6h), 0))
540 goto fail;
541 goto done;
542 case IPPROTO_UDPLITE:
543 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
544 if (!tcf_csum_ipv6_udp(skb, hl,
545 pl + sizeof(*ip6h), 1))
546 goto fail;
547 goto done;
548 case IPPROTO_SCTP:
549 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
550 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
551 goto fail;
552 goto done;
553 default:
554 goto ignore_skb;
555 }
556 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
557
558done:
559ignore_skb:
560 return 1;
561
562fail:
563 return 0;
564}
565
566static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
567 struct tcf_result *res)
568{
569 struct tcf_csum *p = to_tcf_csum(a);
570 bool orig_vlan_tag_present = false;
571 unsigned int vlan_hdr_count = 0;
572 struct tcf_csum_params *params;
573 u32 update_flags;
574 __be16 protocol;
575 int action;
576
577 params = rcu_dereference_bh(p->params);
578
579 tcf_lastuse_update(&p->tcf_tm);
580 tcf_action_update_bstats(&p->common, skb);
581
582 action = READ_ONCE(p->tcf_action);
583 if (unlikely(action == TC_ACT_SHOT))
584 goto drop;
585
586 update_flags = params->update_flags;
587 protocol = skb_protocol(skb, false);
588again:
589 switch (protocol) {
590 case cpu_to_be16(ETH_P_IP):
591 if (!tcf_csum_ipv4(skb, update_flags))
592 goto drop;
593 break;
594 case cpu_to_be16(ETH_P_IPV6):
595 if (!tcf_csum_ipv6(skb, update_flags))
596 goto drop;
597 break;
598 case cpu_to_be16(ETH_P_8021AD):
599 fallthrough;
600 case cpu_to_be16(ETH_P_8021Q):
601 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
602 protocol = skb->protocol;
603 orig_vlan_tag_present = true;
604 } else {
605 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
606
607 protocol = vlan->h_vlan_encapsulated_proto;
608 skb_pull(skb, VLAN_HLEN);
609 skb_reset_network_header(skb);
610 vlan_hdr_count++;
611 }
612 goto again;
613 }
614
615out:
616 /* Restore the skb for the pulled VLAN tags */
617 while (vlan_hdr_count--) {
618 skb_push(skb, VLAN_HLEN);
619 skb_reset_network_header(skb);
620 }
621
622 return action;
623
624drop:
625 tcf_action_inc_drop_qstats(&p->common);
626 action = TC_ACT_SHOT;
627 goto out;
628}
629
630static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
631 int ref)
632{
633 unsigned char *b = skb_tail_pointer(skb);
634 struct tcf_csum *p = to_tcf_csum(a);
635 struct tcf_csum_params *params;
636 struct tc_csum opt = {
637 .index = p->tcf_index,
638 .refcnt = refcount_read(&p->tcf_refcnt) - ref,
639 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
640 };
641 struct tcf_t t;
642
643 spin_lock_bh(&p->tcf_lock);
644 params = rcu_dereference_protected(p->params,
645 lockdep_is_held(&p->tcf_lock));
646 opt.action = p->tcf_action;
647 opt.update_flags = params->update_flags;
648
649 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
650 goto nla_put_failure;
651
652 tcf_tm_dump(&t, &p->tcf_tm);
653 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
654 goto nla_put_failure;
655 spin_unlock_bh(&p->tcf_lock);
656
657 return skb->len;
658
659nla_put_failure:
660 spin_unlock_bh(&p->tcf_lock);
661 nlmsg_trim(skb, b);
662 return -1;
663}
664
665static void tcf_csum_cleanup(struct tc_action *a)
666{
667 struct tcf_csum *p = to_tcf_csum(a);
668 struct tcf_csum_params *params;
669
670 params = rcu_dereference_protected(p->params, 1);
671 if (params)
672 kfree_rcu(params, rcu);
673}
674
675static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
676 struct netlink_callback *cb, int type,
677 const struct tc_action_ops *ops,
678 struct netlink_ext_ack *extack)
679{
680 struct tc_action_net *tn = net_generic(net, csum_net_id);
681
682 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
683}
684
685static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
686{
687 struct tc_action_net *tn = net_generic(net, csum_net_id);
688
689 return tcf_idr_search(tn, a, index);
690}
691
692static size_t tcf_csum_get_fill_size(const struct tc_action *act)
693{
694 return nla_total_size(sizeof(struct tc_csum));
695}
696
697static struct tc_action_ops act_csum_ops = {
698 .kind = "csum",
699 .id = TCA_ID_CSUM,
700 .owner = THIS_MODULE,
701 .act = tcf_csum_act,
702 .dump = tcf_csum_dump,
703 .init = tcf_csum_init,
704 .cleanup = tcf_csum_cleanup,
705 .walk = tcf_csum_walker,
706 .lookup = tcf_csum_search,
707 .get_fill_size = tcf_csum_get_fill_size,
708 .size = sizeof(struct tcf_csum),
709};
710
711static __net_init int csum_init_net(struct net *net)
712{
713 struct tc_action_net *tn = net_generic(net, csum_net_id);
714
715 return tc_action_net_init(net, tn, &act_csum_ops);
716}
717
718static void __net_exit csum_exit_net(struct list_head *net_list)
719{
720 tc_action_net_exit(net_list, csum_net_id);
721}
722
723static struct pernet_operations csum_net_ops = {
724 .init = csum_init_net,
725 .exit_batch = csum_exit_net,
726 .id = &csum_net_id,
727 .size = sizeof(struct tc_action_net),
728};
729
730MODULE_DESCRIPTION("Checksum updating actions");
731MODULE_LICENSE("GPL");
732
733static int __init csum_init_module(void)
734{
735 return tcf_register_action(&act_csum_ops, &csum_net_ops);
736}
737
738static void __exit csum_cleanup_module(void)
739{
740 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
741}
742
743module_init(csum_init_module);
744module_exit(csum_cleanup_module);