Loading...
1/*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on: net/ipv4/ip_fragment.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16/*
17 * Fixes:
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
20 *
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
25 * David Stevens and
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
28 */
29#include <linux/errno.h>
30#include <linux/types.h>
31#include <linux/string.h>
32#include <linux/socket.h>
33#include <linux/sockios.h>
34#include <linux/jiffies.h>
35#include <linux/net.h>
36#include <linux/list.h>
37#include <linux/netdevice.h>
38#include <linux/in6.h>
39#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
42#include <linux/jhash.h>
43#include <linux/skbuff.h>
44#include <linux/slab.h>
45
46#include <net/sock.h>
47#include <net/snmp.h>
48
49#include <net/ipv6.h>
50#include <net/ip6_route.h>
51#include <net/protocol.h>
52#include <net/transp_v6.h>
53#include <net/rawv6.h>
54#include <net/ndisc.h>
55#include <net/addrconf.h>
56#include <net/inet_frag.h>
57
58struct ip6frag_skb_cb
59{
60 struct inet6_skb_parm h;
61 int offset;
62};
63
64#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
65
66
67/*
68 * Equivalent of ipv4 struct ipq
69 */
70
71struct frag_queue
72{
73 struct inet_frag_queue q;
74
75 __be32 id; /* fragment id */
76 u32 user;
77 struct in6_addr saddr;
78 struct in6_addr daddr;
79
80 int iif;
81 unsigned int csum;
82 __u16 nhoffset;
83};
84
85static struct inet_frags ip6_frags;
86
87int ip6_frag_nqueues(struct net *net)
88{
89 return net->ipv6.frags.nqueues;
90}
91
92int ip6_frag_mem(struct net *net)
93{
94 return atomic_read(&net->ipv6.frags.mem);
95}
96
97static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
98 struct net_device *dev);
99
100/*
101 * callers should be careful not to use the hash value outside the ipfrag_lock
102 * as doing so could race with ipfrag_hash_rnd being recalculated.
103 */
104unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
105 const struct in6_addr *daddr, u32 rnd)
106{
107 u32 c;
108
109 c = jhash_3words((__force u32)saddr->s6_addr32[0],
110 (__force u32)saddr->s6_addr32[1],
111 (__force u32)saddr->s6_addr32[2],
112 rnd);
113
114 c = jhash_3words((__force u32)saddr->s6_addr32[3],
115 (__force u32)daddr->s6_addr32[0],
116 (__force u32)daddr->s6_addr32[1],
117 c);
118
119 c = jhash_3words((__force u32)daddr->s6_addr32[2],
120 (__force u32)daddr->s6_addr32[3],
121 (__force u32)id,
122 c);
123
124 return c & (INETFRAGS_HASHSZ - 1);
125}
126EXPORT_SYMBOL_GPL(inet6_hash_frag);
127
128static unsigned int ip6_hashfn(struct inet_frag_queue *q)
129{
130 struct frag_queue *fq;
131
132 fq = container_of(q, struct frag_queue, q);
133 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
134}
135
136int ip6_frag_match(struct inet_frag_queue *q, void *a)
137{
138 struct frag_queue *fq;
139 struct ip6_create_arg *arg = a;
140
141 fq = container_of(q, struct frag_queue, q);
142 return (fq->id == arg->id && fq->user == arg->user &&
143 ipv6_addr_equal(&fq->saddr, arg->src) &&
144 ipv6_addr_equal(&fq->daddr, arg->dst));
145}
146EXPORT_SYMBOL(ip6_frag_match);
147
148void ip6_frag_init(struct inet_frag_queue *q, void *a)
149{
150 struct frag_queue *fq = container_of(q, struct frag_queue, q);
151 struct ip6_create_arg *arg = a;
152
153 fq->id = arg->id;
154 fq->user = arg->user;
155 ipv6_addr_copy(&fq->saddr, arg->src);
156 ipv6_addr_copy(&fq->daddr, arg->dst);
157}
158EXPORT_SYMBOL(ip6_frag_init);
159
160/* Destruction primitives. */
161
162static __inline__ void fq_put(struct frag_queue *fq)
163{
164 inet_frag_put(&fq->q, &ip6_frags);
165}
166
167/* Kill fq entry. It is not destroyed immediately,
168 * because caller (and someone more) holds reference count.
169 */
170static __inline__ void fq_kill(struct frag_queue *fq)
171{
172 inet_frag_kill(&fq->q, &ip6_frags);
173}
174
175static void ip6_evictor(struct net *net, struct inet6_dev *idev)
176{
177 int evicted;
178
179 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
180 if (evicted)
181 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
182}
183
184static void ip6_frag_expire(unsigned long data)
185{
186 struct frag_queue *fq;
187 struct net_device *dev = NULL;
188 struct net *net;
189
190 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
191
192 spin_lock(&fq->q.lock);
193
194 if (fq->q.last_in & INET_FRAG_COMPLETE)
195 goto out;
196
197 fq_kill(fq);
198
199 net = container_of(fq->q.net, struct net, ipv6.frags);
200 rcu_read_lock();
201 dev = dev_get_by_index_rcu(net, fq->iif);
202 if (!dev)
203 goto out_rcu_unlock;
204
205 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
206 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
207
208 /* Don't send error if the first segment did not arrive. */
209 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
210 goto out_rcu_unlock;
211
212 /*
213 But use as source device on which LAST ARRIVED
214 segment was received. And do not use fq->dev
215 pointer directly, device might already disappeared.
216 */
217 fq->q.fragments->dev = dev;
218 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
219out_rcu_unlock:
220 rcu_read_unlock();
221out:
222 spin_unlock(&fq->q.lock);
223 fq_put(fq);
224}
225
226static __inline__ struct frag_queue *
227fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
228{
229 struct inet_frag_queue *q;
230 struct ip6_create_arg arg;
231 unsigned int hash;
232
233 arg.id = id;
234 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
235 arg.src = src;
236 arg.dst = dst;
237
238 read_lock(&ip6_frags.lock);
239 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
240
241 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
242 if (q == NULL)
243 return NULL;
244
245 return container_of(q, struct frag_queue, q);
246}
247
248static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
249 struct frag_hdr *fhdr, int nhoff)
250{
251 struct sk_buff *prev, *next;
252 struct net_device *dev;
253 int offset, end;
254 struct net *net = dev_net(skb_dst(skb)->dev);
255
256 if (fq->q.last_in & INET_FRAG_COMPLETE)
257 goto err;
258
259 offset = ntohs(fhdr->frag_off) & ~0x7;
260 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
261 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
262
263 if ((unsigned int)end > IPV6_MAXPLEN) {
264 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
265 IPSTATS_MIB_INHDRERRORS);
266 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
267 ((u8 *)&fhdr->frag_off -
268 skb_network_header(skb)));
269 return -1;
270 }
271
272 if (skb->ip_summed == CHECKSUM_COMPLETE) {
273 const unsigned char *nh = skb_network_header(skb);
274 skb->csum = csum_sub(skb->csum,
275 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
276 0));
277 }
278
279 /* Is this the final fragment? */
280 if (!(fhdr->frag_off & htons(IP6_MF))) {
281 /* If we already have some bits beyond end
282 * or have different end, the segment is corrupted.
283 */
284 if (end < fq->q.len ||
285 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
286 goto err;
287 fq->q.last_in |= INET_FRAG_LAST_IN;
288 fq->q.len = end;
289 } else {
290 /* Check if the fragment is rounded to 8 bytes.
291 * Required by the RFC.
292 */
293 if (end & 0x7) {
294 /* RFC2460 says always send parameter problem in
295 * this case. -DaveM
296 */
297 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
298 IPSTATS_MIB_INHDRERRORS);
299 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
300 offsetof(struct ipv6hdr, payload_len));
301 return -1;
302 }
303 if (end > fq->q.len) {
304 /* Some bits beyond end -> corruption. */
305 if (fq->q.last_in & INET_FRAG_LAST_IN)
306 goto err;
307 fq->q.len = end;
308 }
309 }
310
311 if (end == offset)
312 goto err;
313
314 /* Point into the IP datagram 'data' part. */
315 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
316 goto err;
317
318 if (pskb_trim_rcsum(skb, end - offset))
319 goto err;
320
321 /* Find out which fragments are in front and at the back of us
322 * in the chain of fragments so far. We must know where to put
323 * this fragment, right?
324 */
325 prev = fq->q.fragments_tail;
326 if (!prev || FRAG6_CB(prev)->offset < offset) {
327 next = NULL;
328 goto found;
329 }
330 prev = NULL;
331 for(next = fq->q.fragments; next != NULL; next = next->next) {
332 if (FRAG6_CB(next)->offset >= offset)
333 break; /* bingo! */
334 prev = next;
335 }
336
337found:
338 /* RFC5722, Section 4:
339 * When reassembling an IPv6 datagram, if
340 * one or more its constituent fragments is determined to be an
341 * overlapping fragment, the entire datagram (and any constituent
342 * fragments, including those not yet received) MUST be silently
343 * discarded.
344 */
345
346 /* Check for overlap with preceding fragment. */
347 if (prev &&
348 (FRAG6_CB(prev)->offset + prev->len) > offset)
349 goto discard_fq;
350
351 /* Look for overlap with succeeding segment. */
352 if (next && FRAG6_CB(next)->offset < end)
353 goto discard_fq;
354
355 FRAG6_CB(skb)->offset = offset;
356
357 /* Insert this fragment in the chain of fragments. */
358 skb->next = next;
359 if (!next)
360 fq->q.fragments_tail = skb;
361 if (prev)
362 prev->next = skb;
363 else
364 fq->q.fragments = skb;
365
366 dev = skb->dev;
367 if (dev) {
368 fq->iif = dev->ifindex;
369 skb->dev = NULL;
370 }
371 fq->q.stamp = skb->tstamp;
372 fq->q.meat += skb->len;
373 atomic_add(skb->truesize, &fq->q.net->mem);
374
375 /* The first fragment.
376 * nhoffset is obtained from the first fragment, of course.
377 */
378 if (offset == 0) {
379 fq->nhoffset = nhoff;
380 fq->q.last_in |= INET_FRAG_FIRST_IN;
381 }
382
383 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
384 fq->q.meat == fq->q.len)
385 return ip6_frag_reasm(fq, prev, dev);
386
387 write_lock(&ip6_frags.lock);
388 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
389 write_unlock(&ip6_frags.lock);
390 return -1;
391
392discard_fq:
393 fq_kill(fq);
394err:
395 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
396 IPSTATS_MIB_REASMFAILS);
397 kfree_skb(skb);
398 return -1;
399}
400
401/*
402 * Check if this packet is complete.
403 * Returns NULL on failure by any reason, and pointer
404 * to current nexthdr field in reassembled frame.
405 *
406 * It is called with locked fq, and caller must check that
407 * queue is eligible for reassembly i.e. it is not COMPLETE,
408 * the last and the first frames arrived and all the bits are here.
409 */
410static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
411 struct net_device *dev)
412{
413 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
414 struct sk_buff *fp, *head = fq->q.fragments;
415 int payload_len;
416 unsigned int nhoff;
417
418 fq_kill(fq);
419
420 /* Make the one we just received the head. */
421 if (prev) {
422 head = prev->next;
423 fp = skb_clone(head, GFP_ATOMIC);
424
425 if (!fp)
426 goto out_oom;
427
428 fp->next = head->next;
429 if (!fp->next)
430 fq->q.fragments_tail = fp;
431 prev->next = fp;
432
433 skb_morph(head, fq->q.fragments);
434 head->next = fq->q.fragments->next;
435
436 kfree_skb(fq->q.fragments);
437 fq->q.fragments = head;
438 }
439
440 WARN_ON(head == NULL);
441 WARN_ON(FRAG6_CB(head)->offset != 0);
442
443 /* Unfragmented part is taken from the first segment. */
444 payload_len = ((head->data - skb_network_header(head)) -
445 sizeof(struct ipv6hdr) + fq->q.len -
446 sizeof(struct frag_hdr));
447 if (payload_len > IPV6_MAXPLEN)
448 goto out_oversize;
449
450 /* Head of list must not be cloned. */
451 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
452 goto out_oom;
453
454 /* If the first fragment is fragmented itself, we split
455 * it to two chunks: the first with data and paged part
456 * and the second, holding only fragments. */
457 if (skb_has_frag_list(head)) {
458 struct sk_buff *clone;
459 int i, plen = 0;
460
461 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
462 goto out_oom;
463 clone->next = head->next;
464 head->next = clone;
465 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
466 skb_frag_list_init(head);
467 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
468 plen += skb_shinfo(head)->frags[i].size;
469 clone->len = clone->data_len = head->data_len - plen;
470 head->data_len -= clone->len;
471 head->len -= clone->len;
472 clone->csum = 0;
473 clone->ip_summed = head->ip_summed;
474 atomic_add(clone->truesize, &fq->q.net->mem);
475 }
476
477 /* We have to remove fragment header from datagram and to relocate
478 * header in order to calculate ICV correctly. */
479 nhoff = fq->nhoffset;
480 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
481 memmove(head->head + sizeof(struct frag_hdr), head->head,
482 (head->data - head->head) - sizeof(struct frag_hdr));
483 head->mac_header += sizeof(struct frag_hdr);
484 head->network_header += sizeof(struct frag_hdr);
485
486 skb_shinfo(head)->frag_list = head->next;
487 skb_reset_transport_header(head);
488 skb_push(head, head->data - skb_network_header(head));
489
490 for (fp=head->next; fp; fp = fp->next) {
491 head->data_len += fp->len;
492 head->len += fp->len;
493 if (head->ip_summed != fp->ip_summed)
494 head->ip_summed = CHECKSUM_NONE;
495 else if (head->ip_summed == CHECKSUM_COMPLETE)
496 head->csum = csum_add(head->csum, fp->csum);
497 head->truesize += fp->truesize;
498 }
499 atomic_sub(head->truesize, &fq->q.net->mem);
500
501 head->next = NULL;
502 head->dev = dev;
503 head->tstamp = fq->q.stamp;
504 ipv6_hdr(head)->payload_len = htons(payload_len);
505 IP6CB(head)->nhoff = nhoff;
506
507 /* Yes, and fold redundant checksum back. 8) */
508 if (head->ip_summed == CHECKSUM_COMPLETE)
509 head->csum = csum_partial(skb_network_header(head),
510 skb_network_header_len(head),
511 head->csum);
512
513 rcu_read_lock();
514 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
515 rcu_read_unlock();
516 fq->q.fragments = NULL;
517 fq->q.fragments_tail = NULL;
518 return 1;
519
520out_oversize:
521 if (net_ratelimit())
522 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
523 goto out_fail;
524out_oom:
525 if (net_ratelimit())
526 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
527out_fail:
528 rcu_read_lock();
529 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
530 rcu_read_unlock();
531 return -1;
532}
533
534static int ipv6_frag_rcv(struct sk_buff *skb)
535{
536 struct frag_hdr *fhdr;
537 struct frag_queue *fq;
538 const struct ipv6hdr *hdr = ipv6_hdr(skb);
539 struct net *net = dev_net(skb_dst(skb)->dev);
540
541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
542
543 /* Jumbo payload inhibits frag. header */
544 if (hdr->payload_len==0)
545 goto fail_hdr;
546
547 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
548 sizeof(struct frag_hdr))))
549 goto fail_hdr;
550
551 hdr = ipv6_hdr(skb);
552 fhdr = (struct frag_hdr *)skb_transport_header(skb);
553
554 if (!(fhdr->frag_off & htons(0xFFF9))) {
555 /* It is not a fragmented frame */
556 skb->transport_header += sizeof(struct frag_hdr);
557 IP6_INC_STATS_BH(net,
558 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
559
560 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
561 return 1;
562 }
563
564 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
565 ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
566
567 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
568 if (fq != NULL) {
569 int ret;
570
571 spin_lock(&fq->q.lock);
572
573 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
574
575 spin_unlock(&fq->q.lock);
576 fq_put(fq);
577 return ret;
578 }
579
580 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
581 kfree_skb(skb);
582 return -1;
583
584fail_hdr:
585 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
586 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
587 return -1;
588}
589
590static const struct inet6_protocol frag_protocol =
591{
592 .handler = ipv6_frag_rcv,
593 .flags = INET6_PROTO_NOPOLICY,
594};
595
596#ifdef CONFIG_SYSCTL
597static struct ctl_table ip6_frags_ns_ctl_table[] = {
598 {
599 .procname = "ip6frag_high_thresh",
600 .data = &init_net.ipv6.frags.high_thresh,
601 .maxlen = sizeof(int),
602 .mode = 0644,
603 .proc_handler = proc_dointvec
604 },
605 {
606 .procname = "ip6frag_low_thresh",
607 .data = &init_net.ipv6.frags.low_thresh,
608 .maxlen = sizeof(int),
609 .mode = 0644,
610 .proc_handler = proc_dointvec
611 },
612 {
613 .procname = "ip6frag_time",
614 .data = &init_net.ipv6.frags.timeout,
615 .maxlen = sizeof(int),
616 .mode = 0644,
617 .proc_handler = proc_dointvec_jiffies,
618 },
619 { }
620};
621
622static struct ctl_table ip6_frags_ctl_table[] = {
623 {
624 .procname = "ip6frag_secret_interval",
625 .data = &ip6_frags.secret_interval,
626 .maxlen = sizeof(int),
627 .mode = 0644,
628 .proc_handler = proc_dointvec_jiffies,
629 },
630 { }
631};
632
633static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
634{
635 struct ctl_table *table;
636 struct ctl_table_header *hdr;
637
638 table = ip6_frags_ns_ctl_table;
639 if (!net_eq(net, &init_net)) {
640 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
641 if (table == NULL)
642 goto err_alloc;
643
644 table[0].data = &net->ipv6.frags.high_thresh;
645 table[1].data = &net->ipv6.frags.low_thresh;
646 table[2].data = &net->ipv6.frags.timeout;
647 }
648
649 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
650 if (hdr == NULL)
651 goto err_reg;
652
653 net->ipv6.sysctl.frags_hdr = hdr;
654 return 0;
655
656err_reg:
657 if (!net_eq(net, &init_net))
658 kfree(table);
659err_alloc:
660 return -ENOMEM;
661}
662
663static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
664{
665 struct ctl_table *table;
666
667 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
668 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
669 if (!net_eq(net, &init_net))
670 kfree(table);
671}
672
673static struct ctl_table_header *ip6_ctl_header;
674
675static int ip6_frags_sysctl_register(void)
676{
677 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
678 ip6_frags_ctl_table);
679 return ip6_ctl_header == NULL ? -ENOMEM : 0;
680}
681
682static void ip6_frags_sysctl_unregister(void)
683{
684 unregister_net_sysctl_table(ip6_ctl_header);
685}
686#else
687static inline int ip6_frags_ns_sysctl_register(struct net *net)
688{
689 return 0;
690}
691
692static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
693{
694}
695
696static inline int ip6_frags_sysctl_register(void)
697{
698 return 0;
699}
700
701static inline void ip6_frags_sysctl_unregister(void)
702{
703}
704#endif
705
706static int __net_init ipv6_frags_init_net(struct net *net)
707{
708 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
709 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
710 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
711
712 inet_frags_init_net(&net->ipv6.frags);
713
714 return ip6_frags_ns_sysctl_register(net);
715}
716
717static void __net_exit ipv6_frags_exit_net(struct net *net)
718{
719 ip6_frags_ns_sysctl_unregister(net);
720 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
721}
722
723static struct pernet_operations ip6_frags_ops = {
724 .init = ipv6_frags_init_net,
725 .exit = ipv6_frags_exit_net,
726};
727
728int __init ipv6_frag_init(void)
729{
730 int ret;
731
732 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
733 if (ret)
734 goto out;
735
736 ret = ip6_frags_sysctl_register();
737 if (ret)
738 goto err_sysctl;
739
740 ret = register_pernet_subsys(&ip6_frags_ops);
741 if (ret)
742 goto err_pernet;
743
744 ip6_frags.hashfn = ip6_hashfn;
745 ip6_frags.constructor = ip6_frag_init;
746 ip6_frags.destructor = NULL;
747 ip6_frags.skb_free = NULL;
748 ip6_frags.qsize = sizeof(struct frag_queue);
749 ip6_frags.match = ip6_frag_match;
750 ip6_frags.frag_expire = ip6_frag_expire;
751 ip6_frags.secret_interval = 10 * 60 * HZ;
752 inet_frags_init(&ip6_frags);
753out:
754 return ret;
755
756err_pernet:
757 ip6_frags_sysctl_unregister();
758err_sysctl:
759 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
760 goto out;
761}
762
763void ipv6_frag_exit(void)
764{
765 inet_frags_fini(&ip6_frags);
766 ip6_frags_sysctl_unregister();
767 unregister_pernet_subsys(&ip6_frags_ops);
768 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
769}
1/*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on: net/ipv4/ip_fragment.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16/*
17 * Fixes:
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
20 *
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
25 * David Stevens and
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
28 */
29
30#define pr_fmt(fmt) "IPv6: " fmt
31
32#include <linux/errno.h>
33#include <linux/types.h>
34#include <linux/string.h>
35#include <linux/socket.h>
36#include <linux/sockios.h>
37#include <linux/jiffies.h>
38#include <linux/net.h>
39#include <linux/list.h>
40#include <linux/netdevice.h>
41#include <linux/in6.h>
42#include <linux/ipv6.h>
43#include <linux/icmpv6.h>
44#include <linux/random.h>
45#include <linux/jhash.h>
46#include <linux/skbuff.h>
47#include <linux/slab.h>
48#include <linux/export.h>
49
50#include <net/sock.h>
51#include <net/snmp.h>
52
53#include <net/ipv6.h>
54#include <net/ip6_route.h>
55#include <net/protocol.h>
56#include <net/transp_v6.h>
57#include <net/rawv6.h>
58#include <net/ndisc.h>
59#include <net/addrconf.h>
60#include <net/inet_frag.h>
61#include <net/inet_ecn.h>
62
63static const char ip6_frag_cache_name[] = "ip6-frags";
64
65static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
66{
67 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
68}
69
70static struct inet_frags ip6_frags;
71
72static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
73 struct net_device *dev);
74
75void ip6_frag_init(struct inet_frag_queue *q, const void *a)
76{
77 struct frag_queue *fq = container_of(q, struct frag_queue, q);
78 const struct frag_v6_compare_key *key = a;
79
80 q->key.v6 = *key;
81 fq->ecn = 0;
82}
83EXPORT_SYMBOL(ip6_frag_init);
84
85void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
86{
87 struct net_device *dev = NULL;
88 struct sk_buff *head;
89
90 rcu_read_lock();
91 spin_lock(&fq->q.lock);
92
93 if (fq->q.flags & INET_FRAG_COMPLETE)
94 goto out;
95
96 inet_frag_kill(&fq->q);
97
98 dev = dev_get_by_index_rcu(net, fq->iif);
99 if (!dev)
100 goto out;
101
102 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
103 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
104
105 /* Don't send error if the first segment did not arrive. */
106 head = fq->q.fragments;
107 if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
108 goto out;
109
110 /* But use as source device on which LAST ARRIVED
111 * segment was received. And do not use fq->dev
112 * pointer directly, device might already disappeared.
113 */
114 head->dev = dev;
115 skb_get(head);
116 spin_unlock(&fq->q.lock);
117
118 icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
119 kfree_skb(head);
120 goto out_rcu_unlock;
121
122out:
123 spin_unlock(&fq->q.lock);
124out_rcu_unlock:
125 rcu_read_unlock();
126 inet_frag_put(&fq->q);
127}
128EXPORT_SYMBOL(ip6_expire_frag_queue);
129
130static void ip6_frag_expire(struct timer_list *t)
131{
132 struct inet_frag_queue *frag = from_timer(frag, t, timer);
133 struct frag_queue *fq;
134 struct net *net;
135
136 fq = container_of(frag, struct frag_queue, q);
137 net = container_of(fq->q.net, struct net, ipv6.frags);
138
139 ip6_expire_frag_queue(net, fq);
140}
141
142static struct frag_queue *
143fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
144{
145 struct frag_v6_compare_key key = {
146 .id = id,
147 .saddr = hdr->saddr,
148 .daddr = hdr->daddr,
149 .user = IP6_DEFRAG_LOCAL_DELIVER,
150 .iif = iif,
151 };
152 struct inet_frag_queue *q;
153
154 if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
155 IPV6_ADDR_LINKLOCAL)))
156 key.iif = 0;
157
158 q = inet_frag_find(&net->ipv6.frags, &key);
159 if (!q)
160 return NULL;
161
162 return container_of(q, struct frag_queue, q);
163}
164
165static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
166 struct frag_hdr *fhdr, int nhoff)
167{
168 struct sk_buff *prev, *next;
169 struct net_device *dev;
170 int offset, end, fragsize;
171 struct net *net = dev_net(skb_dst(skb)->dev);
172 u8 ecn;
173
174 if (fq->q.flags & INET_FRAG_COMPLETE)
175 goto err;
176
177 offset = ntohs(fhdr->frag_off) & ~0x7;
178 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
179 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
180
181 if ((unsigned int)end > IPV6_MAXPLEN) {
182 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
183 IPSTATS_MIB_INHDRERRORS);
184 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
185 ((u8 *)&fhdr->frag_off -
186 skb_network_header(skb)));
187 return -1;
188 }
189
190 ecn = ip6_frag_ecn(ipv6_hdr(skb));
191
192 if (skb->ip_summed == CHECKSUM_COMPLETE) {
193 const unsigned char *nh = skb_network_header(skb);
194 skb->csum = csum_sub(skb->csum,
195 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
196 0));
197 }
198
199 /* Is this the final fragment? */
200 if (!(fhdr->frag_off & htons(IP6_MF))) {
201 /* If we already have some bits beyond end
202 * or have different end, the segment is corrupted.
203 */
204 if (end < fq->q.len ||
205 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
206 goto err;
207 fq->q.flags |= INET_FRAG_LAST_IN;
208 fq->q.len = end;
209 } else {
210 /* Check if the fragment is rounded to 8 bytes.
211 * Required by the RFC.
212 */
213 if (end & 0x7) {
214 /* RFC2460 says always send parameter problem in
215 * this case. -DaveM
216 */
217 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
218 IPSTATS_MIB_INHDRERRORS);
219 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
220 offsetof(struct ipv6hdr, payload_len));
221 return -1;
222 }
223 if (end > fq->q.len) {
224 /* Some bits beyond end -> corruption. */
225 if (fq->q.flags & INET_FRAG_LAST_IN)
226 goto err;
227 fq->q.len = end;
228 }
229 }
230
231 if (end == offset)
232 goto err;
233
234 /* Point into the IP datagram 'data' part. */
235 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
236 goto err;
237
238 if (pskb_trim_rcsum(skb, end - offset))
239 goto err;
240
241 /* Find out which fragments are in front and at the back of us
242 * in the chain of fragments so far. We must know where to put
243 * this fragment, right?
244 */
245 prev = fq->q.fragments_tail;
246 if (!prev || prev->ip_defrag_offset < offset) {
247 next = NULL;
248 goto found;
249 }
250 prev = NULL;
251 for (next = fq->q.fragments; next != NULL; next = next->next) {
252 if (next->ip_defrag_offset >= offset)
253 break; /* bingo! */
254 prev = next;
255 }
256
257found:
258 /* RFC5722, Section 4, amended by Errata ID : 3089
259 * When reassembling an IPv6 datagram, if
260 * one or more its constituent fragments is determined to be an
261 * overlapping fragment, the entire datagram (and any constituent
262 * fragments) MUST be silently discarded.
263 */
264
265 /* Check for overlap with preceding fragment. */
266 if (prev &&
267 (prev->ip_defrag_offset + prev->len) > offset)
268 goto discard_fq;
269
270 /* Look for overlap with succeeding segment. */
271 if (next && next->ip_defrag_offset < end)
272 goto discard_fq;
273
274 /* Note : skb->ip_defrag_offset and skb->dev share the same location */
275 dev = skb->dev;
276 if (dev)
277 fq->iif = dev->ifindex;
278 /* Makes sure compiler wont do silly aliasing games */
279 barrier();
280 skb->ip_defrag_offset = offset;
281
282 /* Insert this fragment in the chain of fragments. */
283 skb->next = next;
284 if (!next)
285 fq->q.fragments_tail = skb;
286 if (prev)
287 prev->next = skb;
288 else
289 fq->q.fragments = skb;
290
291 fq->q.stamp = skb->tstamp;
292 fq->q.meat += skb->len;
293 fq->ecn |= ecn;
294 add_frag_mem_limit(fq->q.net, skb->truesize);
295
296 fragsize = -skb_network_offset(skb) + skb->len;
297 if (fragsize > fq->q.max_size)
298 fq->q.max_size = fragsize;
299
300 /* The first fragment.
301 * nhoffset is obtained from the first fragment, of course.
302 */
303 if (offset == 0) {
304 fq->nhoffset = nhoff;
305 fq->q.flags |= INET_FRAG_FIRST_IN;
306 }
307
308 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
309 fq->q.meat == fq->q.len) {
310 int res;
311 unsigned long orefdst = skb->_skb_refdst;
312
313 skb->_skb_refdst = 0UL;
314 res = ip6_frag_reasm(fq, prev, dev);
315 skb->_skb_refdst = orefdst;
316 return res;
317 }
318
319 skb_dst_drop(skb);
320 return -1;
321
322discard_fq:
323 inet_frag_kill(&fq->q);
324err:
325 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
326 IPSTATS_MIB_REASMFAILS);
327 kfree_skb(skb);
328 return -1;
329}
330
331/*
332 * Check if this packet is complete.
333 * Returns NULL on failure by any reason, and pointer
334 * to current nexthdr field in reassembled frame.
335 *
336 * It is called with locked fq, and caller must check that
337 * queue is eligible for reassembly i.e. it is not COMPLETE,
338 * the last and the first frames arrived and all the bits are here.
339 */
340static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
341 struct net_device *dev)
342{
343 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
344 struct sk_buff *fp, *head = fq->q.fragments;
345 int payload_len;
346 unsigned int nhoff;
347 int sum_truesize;
348 u8 ecn;
349
350 inet_frag_kill(&fq->q);
351
352 ecn = ip_frag_ecn_table[fq->ecn];
353 if (unlikely(ecn == 0xff))
354 goto out_fail;
355
356 /* Make the one we just received the head. */
357 if (prev) {
358 head = prev->next;
359 fp = skb_clone(head, GFP_ATOMIC);
360
361 if (!fp)
362 goto out_oom;
363
364 fp->next = head->next;
365 if (!fp->next)
366 fq->q.fragments_tail = fp;
367 prev->next = fp;
368
369 skb_morph(head, fq->q.fragments);
370 head->next = fq->q.fragments->next;
371
372 consume_skb(fq->q.fragments);
373 fq->q.fragments = head;
374 }
375
376 WARN_ON(head == NULL);
377 WARN_ON(head->ip_defrag_offset != 0);
378
379 /* Unfragmented part is taken from the first segment. */
380 payload_len = ((head->data - skb_network_header(head)) -
381 sizeof(struct ipv6hdr) + fq->q.len -
382 sizeof(struct frag_hdr));
383 if (payload_len > IPV6_MAXPLEN)
384 goto out_oversize;
385
386 /* Head of list must not be cloned. */
387 if (skb_unclone(head, GFP_ATOMIC))
388 goto out_oom;
389
390 /* If the first fragment is fragmented itself, we split
391 * it to two chunks: the first with data and paged part
392 * and the second, holding only fragments. */
393 if (skb_has_frag_list(head)) {
394 struct sk_buff *clone;
395 int i, plen = 0;
396
397 clone = alloc_skb(0, GFP_ATOMIC);
398 if (!clone)
399 goto out_oom;
400 clone->next = head->next;
401 head->next = clone;
402 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
403 skb_frag_list_init(head);
404 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
405 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
406 clone->len = clone->data_len = head->data_len - plen;
407 head->data_len -= clone->len;
408 head->len -= clone->len;
409 clone->csum = 0;
410 clone->ip_summed = head->ip_summed;
411 add_frag_mem_limit(fq->q.net, clone->truesize);
412 }
413
414 /* We have to remove fragment header from datagram and to relocate
415 * header in order to calculate ICV correctly. */
416 nhoff = fq->nhoffset;
417 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
418 memmove(head->head + sizeof(struct frag_hdr), head->head,
419 (head->data - head->head) - sizeof(struct frag_hdr));
420 if (skb_mac_header_was_set(head))
421 head->mac_header += sizeof(struct frag_hdr);
422 head->network_header += sizeof(struct frag_hdr);
423
424 skb_reset_transport_header(head);
425 skb_push(head, head->data - skb_network_header(head));
426
427 sum_truesize = head->truesize;
428 for (fp = head->next; fp;) {
429 bool headstolen;
430 int delta;
431 struct sk_buff *next = fp->next;
432
433 sum_truesize += fp->truesize;
434 if (head->ip_summed != fp->ip_summed)
435 head->ip_summed = CHECKSUM_NONE;
436 else if (head->ip_summed == CHECKSUM_COMPLETE)
437 head->csum = csum_add(head->csum, fp->csum);
438
439 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
440 kfree_skb_partial(fp, headstolen);
441 } else {
442 if (!skb_shinfo(head)->frag_list)
443 skb_shinfo(head)->frag_list = fp;
444 head->data_len += fp->len;
445 head->len += fp->len;
446 head->truesize += fp->truesize;
447 }
448 fp = next;
449 }
450 sub_frag_mem_limit(fq->q.net, sum_truesize);
451
452 head->next = NULL;
453 head->dev = dev;
454 head->tstamp = fq->q.stamp;
455 ipv6_hdr(head)->payload_len = htons(payload_len);
456 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
457 IP6CB(head)->nhoff = nhoff;
458 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
459 IP6CB(head)->frag_max_size = fq->q.max_size;
460
461 /* Yes, and fold redundant checksum back. 8) */
462 skb_postpush_rcsum(head, skb_network_header(head),
463 skb_network_header_len(head));
464
465 rcu_read_lock();
466 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
467 rcu_read_unlock();
468 fq->q.fragments = NULL;
469 fq->q.fragments_tail = NULL;
470 return 1;
471
472out_oversize:
473 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
474 goto out_fail;
475out_oom:
476 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
477out_fail:
478 rcu_read_lock();
479 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
480 rcu_read_unlock();
481 return -1;
482}
483
484static int ipv6_frag_rcv(struct sk_buff *skb)
485{
486 struct frag_hdr *fhdr;
487 struct frag_queue *fq;
488 const struct ipv6hdr *hdr = ipv6_hdr(skb);
489 struct net *net = dev_net(skb_dst(skb)->dev);
490 int iif;
491
492 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
493 goto fail_hdr;
494
495 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
496
497 /* Jumbo payload inhibits frag. header */
498 if (hdr->payload_len == 0)
499 goto fail_hdr;
500
501 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
502 sizeof(struct frag_hdr))))
503 goto fail_hdr;
504
505 hdr = ipv6_hdr(skb);
506 fhdr = (struct frag_hdr *)skb_transport_header(skb);
507
508 if (!(fhdr->frag_off & htons(0xFFF9))) {
509 /* It is not a fragmented frame */
510 skb->transport_header += sizeof(struct frag_hdr);
511 __IP6_INC_STATS(net,
512 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
513
514 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
515 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
516 return 1;
517 }
518
519 iif = skb->dev ? skb->dev->ifindex : 0;
520 fq = fq_find(net, fhdr->identification, hdr, iif);
521 if (fq) {
522 int ret;
523
524 spin_lock(&fq->q.lock);
525
526 fq->iif = iif;
527 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
528
529 spin_unlock(&fq->q.lock);
530 inet_frag_put(&fq->q);
531 return ret;
532 }
533
534 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
535 kfree_skb(skb);
536 return -1;
537
538fail_hdr:
539 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
540 IPSTATS_MIB_INHDRERRORS);
541 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
542 return -1;
543}
544
545static const struct inet6_protocol frag_protocol = {
546 .handler = ipv6_frag_rcv,
547 .flags = INET6_PROTO_NOPOLICY,
548};
549
550#ifdef CONFIG_SYSCTL
551
552static struct ctl_table ip6_frags_ns_ctl_table[] = {
553 {
554 .procname = "ip6frag_high_thresh",
555 .data = &init_net.ipv6.frags.high_thresh,
556 .maxlen = sizeof(unsigned long),
557 .mode = 0644,
558 .proc_handler = proc_doulongvec_minmax,
559 .extra1 = &init_net.ipv6.frags.low_thresh
560 },
561 {
562 .procname = "ip6frag_low_thresh",
563 .data = &init_net.ipv6.frags.low_thresh,
564 .maxlen = sizeof(unsigned long),
565 .mode = 0644,
566 .proc_handler = proc_doulongvec_minmax,
567 .extra2 = &init_net.ipv6.frags.high_thresh
568 },
569 {
570 .procname = "ip6frag_time",
571 .data = &init_net.ipv6.frags.timeout,
572 .maxlen = sizeof(int),
573 .mode = 0644,
574 .proc_handler = proc_dointvec_jiffies,
575 },
576 { }
577};
578
579/* secret interval has been deprecated */
580static int ip6_frags_secret_interval_unused;
581static struct ctl_table ip6_frags_ctl_table[] = {
582 {
583 .procname = "ip6frag_secret_interval",
584 .data = &ip6_frags_secret_interval_unused,
585 .maxlen = sizeof(int),
586 .mode = 0644,
587 .proc_handler = proc_dointvec_jiffies,
588 },
589 { }
590};
591
592static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
593{
594 struct ctl_table *table;
595 struct ctl_table_header *hdr;
596
597 table = ip6_frags_ns_ctl_table;
598 if (!net_eq(net, &init_net)) {
599 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
600 if (!table)
601 goto err_alloc;
602
603 table[0].data = &net->ipv6.frags.high_thresh;
604 table[0].extra1 = &net->ipv6.frags.low_thresh;
605 table[0].extra2 = &init_net.ipv6.frags.high_thresh;
606 table[1].data = &net->ipv6.frags.low_thresh;
607 table[1].extra2 = &net->ipv6.frags.high_thresh;
608 table[2].data = &net->ipv6.frags.timeout;
609 }
610
611 hdr = register_net_sysctl(net, "net/ipv6", table);
612 if (!hdr)
613 goto err_reg;
614
615 net->ipv6.sysctl.frags_hdr = hdr;
616 return 0;
617
618err_reg:
619 if (!net_eq(net, &init_net))
620 kfree(table);
621err_alloc:
622 return -ENOMEM;
623}
624
625static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
626{
627 struct ctl_table *table;
628
629 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
630 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
631 if (!net_eq(net, &init_net))
632 kfree(table);
633}
634
635static struct ctl_table_header *ip6_ctl_header;
636
637static int ip6_frags_sysctl_register(void)
638{
639 ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
640 ip6_frags_ctl_table);
641 return ip6_ctl_header == NULL ? -ENOMEM : 0;
642}
643
644static void ip6_frags_sysctl_unregister(void)
645{
646 unregister_net_sysctl_table(ip6_ctl_header);
647}
648#else
649static int ip6_frags_ns_sysctl_register(struct net *net)
650{
651 return 0;
652}
653
654static void ip6_frags_ns_sysctl_unregister(struct net *net)
655{
656}
657
658static int ip6_frags_sysctl_register(void)
659{
660 return 0;
661}
662
663static void ip6_frags_sysctl_unregister(void)
664{
665}
666#endif
667
668static int __net_init ipv6_frags_init_net(struct net *net)
669{
670 int res;
671
672 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
673 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
674 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
675 net->ipv6.frags.f = &ip6_frags;
676
677 res = inet_frags_init_net(&net->ipv6.frags);
678 if (res < 0)
679 return res;
680
681 res = ip6_frags_ns_sysctl_register(net);
682 if (res < 0)
683 inet_frags_exit_net(&net->ipv6.frags);
684 return res;
685}
686
687static void __net_exit ipv6_frags_exit_net(struct net *net)
688{
689 ip6_frags_ns_sysctl_unregister(net);
690 inet_frags_exit_net(&net->ipv6.frags);
691}
692
693static struct pernet_operations ip6_frags_ops = {
694 .init = ipv6_frags_init_net,
695 .exit = ipv6_frags_exit_net,
696};
697
698static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
699{
700 return jhash2(data,
701 sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
702}
703
704static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
705{
706 const struct inet_frag_queue *fq = data;
707
708 return jhash2((const u32 *)&fq->key.v6,
709 sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
710}
711
712static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
713{
714 const struct frag_v6_compare_key *key = arg->key;
715 const struct inet_frag_queue *fq = ptr;
716
717 return !!memcmp(&fq->key, key, sizeof(*key));
718}
719
720const struct rhashtable_params ip6_rhash_params = {
721 .head_offset = offsetof(struct inet_frag_queue, node),
722 .hashfn = ip6_key_hashfn,
723 .obj_hashfn = ip6_obj_hashfn,
724 .obj_cmpfn = ip6_obj_cmpfn,
725 .automatic_shrinking = true,
726};
727EXPORT_SYMBOL(ip6_rhash_params);
728
729int __init ipv6_frag_init(void)
730{
731 int ret;
732
733 ip6_frags.constructor = ip6_frag_init;
734 ip6_frags.destructor = NULL;
735 ip6_frags.qsize = sizeof(struct frag_queue);
736 ip6_frags.frag_expire = ip6_frag_expire;
737 ip6_frags.frags_cache_name = ip6_frag_cache_name;
738 ip6_frags.rhash_params = ip6_rhash_params;
739 ret = inet_frags_init(&ip6_frags);
740 if (ret)
741 goto out;
742
743 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
744 if (ret)
745 goto err_protocol;
746
747 ret = ip6_frags_sysctl_register();
748 if (ret)
749 goto err_sysctl;
750
751 ret = register_pernet_subsys(&ip6_frags_ops);
752 if (ret)
753 goto err_pernet;
754
755out:
756 return ret;
757
758err_pernet:
759 ip6_frags_sysctl_unregister();
760err_sysctl:
761 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
762err_protocol:
763 inet_frags_fini(&ip6_frags);
764 goto out;
765}
766
767void ipv6_frag_exit(void)
768{
769 inet_frags_fini(&ip6_frags);
770 ip6_frags_sysctl_unregister();
771 unregister_pernet_subsys(&ip6_frags_ops);
772 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
773}