Loading...
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/netdma.h>
63#include <net/inet_common.h>
64#include <net/secure_seq.h>
65
66#include <asm/uaccess.h>
67
68#include <linux/proc_fs.h>
69#include <linux/seq_file.h>
70
71#include <linux/crypto.h>
72#include <linux/scatterlist.h>
73
74static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
77
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79static void __tcp_v6_send_check(struct sk_buff *skb,
80 const struct in6_addr *saddr,
81 const struct in6_addr *daddr);
82
83static const struct inet_connection_sock_af_ops ipv6_mapped;
84static const struct inet_connection_sock_af_ops ipv6_specific;
85#ifdef CONFIG_TCP_MD5SIG
86static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
88#else
89static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
90 const struct in6_addr *addr)
91{
92 return NULL;
93}
94#endif
95
96static void tcp_v6_hash(struct sock *sk)
97{
98 if (sk->sk_state != TCP_CLOSE) {
99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
100 tcp_prot.hash(sk);
101 return;
102 }
103 local_bh_disable();
104 __inet6_hash(sk, NULL);
105 local_bh_enable();
106 }
107}
108
109static __inline__ __sum16 tcp_v6_check(int len,
110 const struct in6_addr *saddr,
111 const struct in6_addr *daddr,
112 __wsum base)
113{
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
115}
116
117static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
118{
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
121 tcp_hdr(skb)->dest,
122 tcp_hdr(skb)->source);
123}
124
125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
126 int addr_len)
127{
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
129 struct inet_sock *inet = inet_sk(sk);
130 struct inet_connection_sock *icsk = inet_csk(sk);
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
133 struct in6_addr *saddr = NULL, *final_p, final;
134 struct rt6_info *rt;
135 struct flowi6 fl6;
136 struct dst_entry *dst;
137 int addr_type;
138 int err;
139
140 if (addr_len < SIN6_LEN_RFC2133)
141 return -EINVAL;
142
143 if (usin->sin6_family != AF_INET6)
144 return -EAFNOSUPPORT;
145
146 memset(&fl6, 0, sizeof(fl6));
147
148 if (np->sndflow) {
149 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 IP6_ECN_flow_init(fl6.flowlabel);
151 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
152 struct ip6_flowlabel *flowlabel;
153 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
154 if (flowlabel == NULL)
155 return -EINVAL;
156 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
157 fl6_sock_release(flowlabel);
158 }
159 }
160
161 /*
162 * connect() to INADDR_ANY means loopback (BSD'ism).
163 */
164
165 if(ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
167
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
169
170 if(addr_type & IPV6_ADDR_MULTICAST)
171 return -ENETUNREACH;
172
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
177 * must coincide.
178 */
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 return -EINVAL;
182
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 }
185
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
188 return -EINVAL;
189 }
190
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
196 }
197
198 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
199 np->flow_label = fl6.flowlabel;
200
201 /*
202 * TCP over IPv4
203 */
204
205 if (addr_type == IPV6_ADDR_MAPPED) {
206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 struct sockaddr_in sin;
208
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
210
211 if (__ipv6_only_sock(sk))
212 return -ENETUNREACH;
213
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
217
218 icsk->icsk_af_ops = &ipv6_mapped;
219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220#ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222#endif
223
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225
226 if (err) {
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
230#ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
232#endif
233 goto failure;
234 } else {
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
237 &np->rcv_saddr);
238 }
239
240 return err;
241 }
242
243 if (!ipv6_addr_any(&np->rcv_saddr))
244 saddr = &np->rcv_saddr;
245
246 fl6.flowi6_proto = IPPROTO_TCP;
247 ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 ipv6_addr_copy(&fl6.saddr,
249 (saddr ? saddr : &np->saddr));
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
254
255 final_p = fl6_update_dst(&fl6, np->opt, &final);
256
257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258
259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 if (IS_ERR(dst)) {
261 err = PTR_ERR(dst);
262 goto failure;
263 }
264
265 if (saddr == NULL) {
266 saddr = &fl6.saddr;
267 ipv6_addr_copy(&np->rcv_saddr, saddr);
268 }
269
270 /* set the source address */
271 ipv6_addr_copy(&np->saddr, saddr);
272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
276
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
296
297 icsk->icsk_ext_hdr_len = 0;
298 if (np->opt)
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 np->opt->opt_nflen);
301
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303
304 inet->inet_dport = usin->sin6_port;
305
306 tcp_set_state(sk, TCP_SYN_SENT);
307 err = inet6_hash_connect(&tcp_death_row, sk);
308 if (err)
309 goto late_failure;
310
311 if (!tp->write_seq)
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 np->daddr.s6_addr32,
314 inet->inet_sport,
315 inet->inet_dport);
316
317 err = tcp_connect(sk);
318 if (err)
319 goto late_failure;
320
321 return 0;
322
323late_failure:
324 tcp_set_state(sk, TCP_CLOSE);
325 __sk_dst_reset(sk);
326failure:
327 inet->inet_dport = 0;
328 sk->sk_route_caps = 0;
329 return err;
330}
331
332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info)
334{
335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 struct ipv6_pinfo *np;
338 struct sock *sk;
339 int err;
340 struct tcp_sock *tp;
341 __u32 seq;
342 struct net *net = dev_net(skb->dev);
343
344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
346
347 if (sk == NULL) {
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 ICMP6_MIB_INERRORS);
350 return;
351 }
352
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
355 return;
356 }
357
358 bh_lock_sock(sk);
359 if (sock_owned_by_user(sk))
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361
362 if (sk->sk_state == TCP_CLOSE)
363 goto out;
364
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 goto out;
368 }
369
370 tp = tcp_sk(sk);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 goto out;
376 }
377
378 np = inet6_sk(sk);
379
380 if (type == ICMPV6_PKT_TOOBIG) {
381 struct dst_entry *dst;
382
383 if (sock_owned_by_user(sk))
384 goto out;
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 goto out;
387
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
390
391 if (dst == NULL) {
392 struct inet_sock *inet = inet_sk(sk);
393 struct flowi6 fl6;
394
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
397 for now.
398 */
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 ipv6_addr_copy(&fl6.daddr, &np->daddr);
402 ipv6_addr_copy(&fl6.saddr, &np->saddr);
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
412 goto out;
413 }
414
415 } else
416 dst_hold(dst);
417
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
422 dst_release(dst);
423 goto out;
424 }
425
426 icmpv6_err_convert(type, code, &err);
427
428 /* Might be for an request_sock */
429 switch (sk->sk_state) {
430 struct request_sock *req, **prev;
431 case TCP_LISTEN:
432 if (sock_owned_by_user(sk))
433 goto out;
434
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
437 if (!req)
438 goto out;
439
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
442 */
443 WARN_ON(req->sk != NULL);
444
445 if (seq != tcp_rsk(req)->snt_isn) {
446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 goto out;
448 }
449
450 inet_csk_reqsk_queue_drop(sk, req, prev);
451 goto out;
452
453 case TCP_SYN_SENT:
454 case TCP_SYN_RECV: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk)) {
457 sk->sk_err = err;
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
459
460 tcp_done(sk);
461 } else
462 sk->sk_err_soft = err;
463 goto out;
464 }
465
466 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_err = err;
468 sk->sk_error_report(sk);
469 } else
470 sk->sk_err_soft = err;
471
472out:
473 bh_unlock_sock(sk);
474 sock_put(sk);
475}
476
477
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp)
480{
481 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk);
483 struct sk_buff * skb;
484 struct ipv6_txoptions *opt = NULL;
485 struct in6_addr * final_p, final;
486 struct flowi6 fl6;
487 struct dst_entry *dst;
488 int err;
489
490 memset(&fl6, 0, sizeof(fl6));
491 fl6.flowi6_proto = IPPROTO_TCP;
492 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
493 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
494 fl6.flowlabel = 0;
495 fl6.flowi6_oif = treq->iif;
496 fl6.flowi6_mark = sk->sk_mark;
497 fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 fl6.fl6_sport = inet_rsk(req)->loc_port;
499 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
500
501 opt = np->opt;
502 final_p = fl6_update_dst(&fl6, opt, &final);
503
504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
505 if (IS_ERR(dst)) {
506 err = PTR_ERR(dst);
507 dst = NULL;
508 goto done;
509 }
510 skb = tcp_make_synack(sk, dst, req, rvp);
511 err = -ENOMEM;
512 if (skb) {
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514
515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
516 err = ip6_xmit(sk, skb, &fl6, opt);
517 err = net_xmit_eval(err);
518 }
519
520done:
521 if (opt && opt != np->opt)
522 sock_kfree_s(sk, opt, opt->tot_len);
523 dst_release(dst);
524 return err;
525}
526
527static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp)
529{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp);
532}
533
534static void tcp_v6_reqsk_destructor(struct request_sock *req)
535{
536 kfree_skb(inet6_rsk(req)->pktopts);
537}
538
539#ifdef CONFIG_TCP_MD5SIG
540static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr)
542{
543 struct tcp_sock *tp = tcp_sk(sk);
544 int i;
545
546 BUG_ON(tp == NULL);
547
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
550
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
553 return &tp->md5sig_info->keys6[i].base;
554 }
555 return NULL;
556}
557
558static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
559 struct sock *addr_sk)
560{
561 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
562}
563
564static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
565 struct request_sock *req)
566{
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
568}
569
570static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
571 char *newkey, u8 newkeylen)
572{
573 /* Add key to the list */
574 struct tcp_md5sig_key *key;
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
577
578 key = tcp_v6_md5_do_lookup(sk, peer);
579 if (key) {
580 /* modify existing entry - just update that one */
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
591 }
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
593 }
594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
596 kfree(newkey);
597 return -ENOMEM;
598 }
599 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
600 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
602
603 if (!keys) {
604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
607 return -ENOMEM;
608 }
609
610 if (tp->md5sig_info->entries6)
611 memmove(keys, tp->md5sig_info->keys6,
612 (sizeof (tp->md5sig_info->keys6[0]) *
613 tp->md5sig_info->entries6));
614
615 kfree(tp->md5sig_info->keys6);
616 tp->md5sig_info->keys6 = keys;
617 tp->md5sig_info->alloced6++;
618 }
619
620 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
621 peer);
622 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
623 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
624
625 tp->md5sig_info->entries6++;
626 }
627 return 0;
628}
629
630static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
631 u8 *newkey, __u8 newkeylen)
632{
633 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
634 newkey, newkeylen);
635}
636
637static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
638{
639 struct tcp_sock *tp = tcp_sk(sk);
640 int i;
641
642 for (i = 0; i < tp->md5sig_info->entries6; i++) {
643 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
644 /* Free the key */
645 kfree(tp->md5sig_info->keys6[i].base.key);
646 tp->md5sig_info->entries6--;
647
648 if (tp->md5sig_info->entries6 == 0) {
649 kfree(tp->md5sig_info->keys6);
650 tp->md5sig_info->keys6 = NULL;
651 tp->md5sig_info->alloced6 = 0;
652 tcp_free_md5sig_pool();
653 } else {
654 /* shrink the database */
655 if (tp->md5sig_info->entries6 != i)
656 memmove(&tp->md5sig_info->keys6[i],
657 &tp->md5sig_info->keys6[i+1],
658 (tp->md5sig_info->entries6 - i)
659 * sizeof (tp->md5sig_info->keys6[0]));
660 }
661 return 0;
662 }
663 }
664 return -ENOENT;
665}
666
667static void tcp_v6_clear_md5_list (struct sock *sk)
668{
669 struct tcp_sock *tp = tcp_sk(sk);
670 int i;
671
672 if (tp->md5sig_info->entries6) {
673 for (i = 0; i < tp->md5sig_info->entries6; i++)
674 kfree(tp->md5sig_info->keys6[i].base.key);
675 tp->md5sig_info->entries6 = 0;
676 tcp_free_md5sig_pool();
677 }
678
679 kfree(tp->md5sig_info->keys6);
680 tp->md5sig_info->keys6 = NULL;
681 tp->md5sig_info->alloced6 = 0;
682
683 if (tp->md5sig_info->entries4) {
684 for (i = 0; i < tp->md5sig_info->entries4; i++)
685 kfree(tp->md5sig_info->keys4[i].base.key);
686 tp->md5sig_info->entries4 = 0;
687 tcp_free_md5sig_pool();
688 }
689
690 kfree(tp->md5sig_info->keys4);
691 tp->md5sig_info->keys4 = NULL;
692 tp->md5sig_info->alloced4 = 0;
693}
694
695static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
696 int optlen)
697{
698 struct tcp_md5sig cmd;
699 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
700 u8 *newkey;
701
702 if (optlen < sizeof(cmd))
703 return -EINVAL;
704
705 if (copy_from_user(&cmd, optval, sizeof(cmd)))
706 return -EFAULT;
707
708 if (sin6->sin6_family != AF_INET6)
709 return -EINVAL;
710
711 if (!cmd.tcpm_keylen) {
712 if (!tcp_sk(sk)->md5sig_info)
713 return -ENOENT;
714 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
715 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
716 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
717 }
718
719 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
720 return -EINVAL;
721
722 if (!tcp_sk(sk)->md5sig_info) {
723 struct tcp_sock *tp = tcp_sk(sk);
724 struct tcp_md5sig_info *p;
725
726 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
727 if (!p)
728 return -ENOMEM;
729
730 tp->md5sig_info = p;
731 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
732 }
733
734 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
735 if (!newkey)
736 return -ENOMEM;
737 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
738 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
739 newkey, cmd.tcpm_keylen);
740 }
741 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
742}
743
744static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
745 const struct in6_addr *daddr,
746 const struct in6_addr *saddr, int nbytes)
747{
748 struct tcp6_pseudohdr *bp;
749 struct scatterlist sg;
750
751 bp = &hp->md5_blk.ip6;
752 /* 1. TCP pseudo-header (RFC2460) */
753 ipv6_addr_copy(&bp->saddr, saddr);
754 ipv6_addr_copy(&bp->daddr, daddr);
755 bp->protocol = cpu_to_be32(IPPROTO_TCP);
756 bp->len = cpu_to_be32(nbytes);
757
758 sg_init_one(&sg, bp, sizeof(*bp));
759 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
760}
761
762static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
763 const struct in6_addr *daddr, struct in6_addr *saddr,
764 struct tcphdr *th)
765{
766 struct tcp_md5sig_pool *hp;
767 struct hash_desc *desc;
768
769 hp = tcp_get_md5sig_pool();
770 if (!hp)
771 goto clear_hash_noput;
772 desc = &hp->md5_desc;
773
774 if (crypto_hash_init(desc))
775 goto clear_hash;
776 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
777 goto clear_hash;
778 if (tcp_md5_hash_header(hp, th))
779 goto clear_hash;
780 if (tcp_md5_hash_key(hp, key))
781 goto clear_hash;
782 if (crypto_hash_final(desc, md5_hash))
783 goto clear_hash;
784
785 tcp_put_md5sig_pool();
786 return 0;
787
788clear_hash:
789 tcp_put_md5sig_pool();
790clear_hash_noput:
791 memset(md5_hash, 0, 16);
792 return 1;
793}
794
795static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
796 struct sock *sk, struct request_sock *req,
797 struct sk_buff *skb)
798{
799 const struct in6_addr *saddr, *daddr;
800 struct tcp_md5sig_pool *hp;
801 struct hash_desc *desc;
802 struct tcphdr *th = tcp_hdr(skb);
803
804 if (sk) {
805 saddr = &inet6_sk(sk)->saddr;
806 daddr = &inet6_sk(sk)->daddr;
807 } else if (req) {
808 saddr = &inet6_rsk(req)->loc_addr;
809 daddr = &inet6_rsk(req)->rmt_addr;
810 } else {
811 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
812 saddr = &ip6h->saddr;
813 daddr = &ip6h->daddr;
814 }
815
816 hp = tcp_get_md5sig_pool();
817 if (!hp)
818 goto clear_hash_noput;
819 desc = &hp->md5_desc;
820
821 if (crypto_hash_init(desc))
822 goto clear_hash;
823
824 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
825 goto clear_hash;
826 if (tcp_md5_hash_header(hp, th))
827 goto clear_hash;
828 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
829 goto clear_hash;
830 if (tcp_md5_hash_key(hp, key))
831 goto clear_hash;
832 if (crypto_hash_final(desc, md5_hash))
833 goto clear_hash;
834
835 tcp_put_md5sig_pool();
836 return 0;
837
838clear_hash:
839 tcp_put_md5sig_pool();
840clear_hash_noput:
841 memset(md5_hash, 0, 16);
842 return 1;
843}
844
845static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
846{
847 __u8 *hash_location = NULL;
848 struct tcp_md5sig_key *hash_expected;
849 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
850 struct tcphdr *th = tcp_hdr(skb);
851 int genhash;
852 u8 newhash[16];
853
854 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
855 hash_location = tcp_parse_md5sig_option(th);
856
857 /* We've parsed the options - do we have a hash? */
858 if (!hash_expected && !hash_location)
859 return 0;
860
861 if (hash_expected && !hash_location) {
862 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
863 return 1;
864 }
865
866 if (!hash_expected && hash_location) {
867 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
868 return 1;
869 }
870
871 /* check the signature */
872 genhash = tcp_v6_md5_hash_skb(newhash,
873 hash_expected,
874 NULL, NULL, skb);
875
876 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
877 if (net_ratelimit()) {
878 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
879 genhash ? "failed" : "mismatch",
880 &ip6h->saddr, ntohs(th->source),
881 &ip6h->daddr, ntohs(th->dest));
882 }
883 return 1;
884 }
885 return 0;
886}
887#endif
888
889struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
890 .family = AF_INET6,
891 .obj_size = sizeof(struct tcp6_request_sock),
892 .rtx_syn_ack = tcp_v6_rtx_synack,
893 .send_ack = tcp_v6_reqsk_send_ack,
894 .destructor = tcp_v6_reqsk_destructor,
895 .send_reset = tcp_v6_send_reset,
896 .syn_ack_timeout = tcp_syn_ack_timeout,
897};
898
899#ifdef CONFIG_TCP_MD5SIG
900static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
901 .md5_lookup = tcp_v6_reqsk_md5_lookup,
902 .calc_md5_hash = tcp_v6_md5_hash_skb,
903};
904#endif
905
906static void __tcp_v6_send_check(struct sk_buff *skb,
907 const struct in6_addr *saddr, const struct in6_addr *daddr)
908{
909 struct tcphdr *th = tcp_hdr(skb);
910
911 if (skb->ip_summed == CHECKSUM_PARTIAL) {
912 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
913 skb->csum_start = skb_transport_header(skb) - skb->head;
914 skb->csum_offset = offsetof(struct tcphdr, check);
915 } else {
916 th->check = tcp_v6_check(skb->len, saddr, daddr,
917 csum_partial(th, th->doff << 2,
918 skb->csum));
919 }
920}
921
922static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
923{
924 struct ipv6_pinfo *np = inet6_sk(sk);
925
926 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
927}
928
929static int tcp_v6_gso_send_check(struct sk_buff *skb)
930{
931 const struct ipv6hdr *ipv6h;
932 struct tcphdr *th;
933
934 if (!pskb_may_pull(skb, sizeof(*th)))
935 return -EINVAL;
936
937 ipv6h = ipv6_hdr(skb);
938 th = tcp_hdr(skb);
939
940 th->check = 0;
941 skb->ip_summed = CHECKSUM_PARTIAL;
942 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
943 return 0;
944}
945
946static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
947 struct sk_buff *skb)
948{
949 const struct ipv6hdr *iph = skb_gro_network_header(skb);
950
951 switch (skb->ip_summed) {
952 case CHECKSUM_COMPLETE:
953 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
954 skb->csum)) {
955 skb->ip_summed = CHECKSUM_UNNECESSARY;
956 break;
957 }
958
959 /* fall through */
960 case CHECKSUM_NONE:
961 NAPI_GRO_CB(skb)->flush = 1;
962 return NULL;
963 }
964
965 return tcp_gro_receive(head, skb);
966}
967
968static int tcp6_gro_complete(struct sk_buff *skb)
969{
970 const struct ipv6hdr *iph = ipv6_hdr(skb);
971 struct tcphdr *th = tcp_hdr(skb);
972
973 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
974 &iph->saddr, &iph->daddr, 0);
975 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
976
977 return tcp_gro_complete(skb);
978}
979
980static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
981 u32 ts, struct tcp_md5sig_key *key, int rst)
982{
983 struct tcphdr *th = tcp_hdr(skb), *t1;
984 struct sk_buff *buff;
985 struct flowi6 fl6;
986 struct net *net = dev_net(skb_dst(skb)->dev);
987 struct sock *ctl_sk = net->ipv6.tcp_sk;
988 unsigned int tot_len = sizeof(struct tcphdr);
989 struct dst_entry *dst;
990 __be32 *topt;
991
992 if (ts)
993 tot_len += TCPOLEN_TSTAMP_ALIGNED;
994#ifdef CONFIG_TCP_MD5SIG
995 if (key)
996 tot_len += TCPOLEN_MD5SIG_ALIGNED;
997#endif
998
999 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1000 GFP_ATOMIC);
1001 if (buff == NULL)
1002 return;
1003
1004 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1005
1006 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1007 skb_reset_transport_header(buff);
1008
1009 /* Swap the send and the receive. */
1010 memset(t1, 0, sizeof(*t1));
1011 t1->dest = th->source;
1012 t1->source = th->dest;
1013 t1->doff = tot_len / 4;
1014 t1->seq = htonl(seq);
1015 t1->ack_seq = htonl(ack);
1016 t1->ack = !rst || !th->ack;
1017 t1->rst = rst;
1018 t1->window = htons(win);
1019
1020 topt = (__be32 *)(t1 + 1);
1021
1022 if (ts) {
1023 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1024 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1025 *topt++ = htonl(tcp_time_stamp);
1026 *topt++ = htonl(ts);
1027 }
1028
1029#ifdef CONFIG_TCP_MD5SIG
1030 if (key) {
1031 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1032 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1033 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1034 &ipv6_hdr(skb)->saddr,
1035 &ipv6_hdr(skb)->daddr, t1);
1036 }
1037#endif
1038
1039 memset(&fl6, 0, sizeof(fl6));
1040 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1041 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
1042
1043 buff->ip_summed = CHECKSUM_PARTIAL;
1044 buff->csum = 0;
1045
1046 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1047
1048 fl6.flowi6_proto = IPPROTO_TCP;
1049 fl6.flowi6_oif = inet6_iif(skb);
1050 fl6.fl6_dport = t1->dest;
1051 fl6.fl6_sport = t1->source;
1052 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1053
1054 /* Pass a socket to ip6_dst_lookup either it is for RST
1055 * Underlying function will use this to retrieve the network
1056 * namespace
1057 */
1058 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1059 if (!IS_ERR(dst)) {
1060 skb_dst_set(buff, dst);
1061 ip6_xmit(ctl_sk, buff, &fl6, NULL);
1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1063 if (rst)
1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1065 return;
1066 }
1067
1068 kfree_skb(buff);
1069}
1070
1071static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1072{
1073 struct tcphdr *th = tcp_hdr(skb);
1074 u32 seq = 0, ack_seq = 0;
1075 struct tcp_md5sig_key *key = NULL;
1076
1077 if (th->rst)
1078 return;
1079
1080 if (!ipv6_unicast_destination(skb))
1081 return;
1082
1083#ifdef CONFIG_TCP_MD5SIG
1084 if (sk)
1085 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1086#endif
1087
1088 if (th->ack)
1089 seq = ntohl(th->ack_seq);
1090 else
1091 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092 (th->doff << 2);
1093
1094 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1095}
1096
1097static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1098 struct tcp_md5sig_key *key)
1099{
1100 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1101}
1102
1103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1104{
1105 struct inet_timewait_sock *tw = inet_twsk(sk);
1106 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1107
1108 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1109 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1110 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1111
1112 inet_twsk_put(tw);
1113}
1114
1115static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1116 struct request_sock *req)
1117{
1118 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1119 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1120}
1121
1122
1123static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1124{
1125 struct request_sock *req, **prev;
1126 const struct tcphdr *th = tcp_hdr(skb);
1127 struct sock *nsk;
1128
1129 /* Find possible connection requests. */
1130 req = inet6_csk_search_req(sk, &prev, th->source,
1131 &ipv6_hdr(skb)->saddr,
1132 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1133 if (req)
1134 return tcp_check_req(sk, skb, req, prev);
1135
1136 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1137 &ipv6_hdr(skb)->saddr, th->source,
1138 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1139
1140 if (nsk) {
1141 if (nsk->sk_state != TCP_TIME_WAIT) {
1142 bh_lock_sock(nsk);
1143 return nsk;
1144 }
1145 inet_twsk_put(inet_twsk(nsk));
1146 return NULL;
1147 }
1148
1149#ifdef CONFIG_SYN_COOKIES
1150 if (!th->syn)
1151 sk = cookie_v6_check(sk, skb);
1152#endif
1153 return sk;
1154}
1155
1156/* FIXME: this is substantially similar to the ipv4 code.
1157 * Can some kind of merge be done? -- erics
1158 */
1159static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1160{
1161 struct tcp_extend_values tmp_ext;
1162 struct tcp_options_received tmp_opt;
1163 u8 *hash_location;
1164 struct request_sock *req;
1165 struct inet6_request_sock *treq;
1166 struct ipv6_pinfo *np = inet6_sk(sk);
1167 struct tcp_sock *tp = tcp_sk(sk);
1168 __u32 isn = TCP_SKB_CB(skb)->when;
1169 struct dst_entry *dst = NULL;
1170 int want_cookie = 0;
1171
1172 if (skb->protocol == htons(ETH_P_IP))
1173 return tcp_v4_conn_request(sk, skb);
1174
1175 if (!ipv6_unicast_destination(skb))
1176 goto drop;
1177
1178 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1179 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1180 if (!want_cookie)
1181 goto drop;
1182 }
1183
1184 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1185 goto drop;
1186
1187 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1188 if (req == NULL)
1189 goto drop;
1190
1191#ifdef CONFIG_TCP_MD5SIG
1192 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1193#endif
1194
1195 tcp_clear_options(&tmp_opt);
1196 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1197 tmp_opt.user_mss = tp->rx_opt.user_mss;
1198 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1199
1200 if (tmp_opt.cookie_plus > 0 &&
1201 tmp_opt.saw_tstamp &&
1202 !tp->rx_opt.cookie_out_never &&
1203 (sysctl_tcp_cookie_size > 0 ||
1204 (tp->cookie_values != NULL &&
1205 tp->cookie_values->cookie_desired > 0))) {
1206 u8 *c;
1207 u32 *d;
1208 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1209 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1210
1211 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1212 goto drop_and_free;
1213
1214 /* Secret recipe starts with IP addresses */
1215 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1216 *mess++ ^= *d++;
1217 *mess++ ^= *d++;
1218 *mess++ ^= *d++;
1219 *mess++ ^= *d++;
1220 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1221 *mess++ ^= *d++;
1222 *mess++ ^= *d++;
1223 *mess++ ^= *d++;
1224 *mess++ ^= *d++;
1225
1226 /* plus variable length Initiator Cookie */
1227 c = (u8 *)mess;
1228 while (l-- > 0)
1229 *c++ ^= *hash_location++;
1230
1231 want_cookie = 0; /* not our kind of cookie */
1232 tmp_ext.cookie_out_never = 0; /* false */
1233 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1234 } else if (!tp->rx_opt.cookie_in_always) {
1235 /* redundant indications, but ensure initialization. */
1236 tmp_ext.cookie_out_never = 1; /* true */
1237 tmp_ext.cookie_plus = 0;
1238 } else {
1239 goto drop_and_free;
1240 }
1241 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1242
1243 if (want_cookie && !tmp_opt.saw_tstamp)
1244 tcp_clear_options(&tmp_opt);
1245
1246 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1247 tcp_openreq_init(req, &tmp_opt, skb);
1248
1249 treq = inet6_rsk(req);
1250 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1251 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1252 if (!want_cookie || tmp_opt.tstamp_ok)
1253 TCP_ECN_create_request(req, tcp_hdr(skb));
1254
1255 if (!isn) {
1256 struct inet_peer *peer = NULL;
1257
1258 if (ipv6_opt_accepted(sk, skb) ||
1259 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1260 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1261 atomic_inc(&skb->users);
1262 treq->pktopts = skb;
1263 }
1264 treq->iif = sk->sk_bound_dev_if;
1265
1266 /* So that link locals have meaning */
1267 if (!sk->sk_bound_dev_if &&
1268 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1269 treq->iif = inet6_iif(skb);
1270
1271 if (want_cookie) {
1272 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1273 req->cookie_ts = tmp_opt.tstamp_ok;
1274 goto have_isn;
1275 }
1276
1277 /* VJ's idea. We save last timestamp seen
1278 * from the destination in peer table, when entering
1279 * state TIME-WAIT, and check against it before
1280 * accepting new connection request.
1281 *
1282 * If "isn" is not zero, this request hit alive
1283 * timewait bucket, so that all the necessary checks
1284 * are made in the function processing timewait state.
1285 */
1286 if (tmp_opt.saw_tstamp &&
1287 tcp_death_row.sysctl_tw_recycle &&
1288 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1289 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1290 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1291 &treq->rmt_addr)) {
1292 inet_peer_refcheck(peer);
1293 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1294 (s32)(peer->tcp_ts - req->ts_recent) >
1295 TCP_PAWS_WINDOW) {
1296 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1297 goto drop_and_release;
1298 }
1299 }
1300 /* Kill the following clause, if you dislike this way. */
1301 else if (!sysctl_tcp_syncookies &&
1302 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1303 (sysctl_max_syn_backlog >> 2)) &&
1304 (!peer || !peer->tcp_ts_stamp) &&
1305 (!dst || !dst_metric(dst, RTAX_RTT))) {
1306 /* Without syncookies last quarter of
1307 * backlog is filled with destinations,
1308 * proven to be alive.
1309 * It means that we continue to communicate
1310 * to destinations, already remembered
1311 * to the moment of synflood.
1312 */
1313 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1314 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1315 goto drop_and_release;
1316 }
1317
1318 isn = tcp_v6_init_sequence(skb);
1319 }
1320have_isn:
1321 tcp_rsk(req)->snt_isn = isn;
1322 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1323
1324 security_inet_conn_request(sk, skb, req);
1325
1326 if (tcp_v6_send_synack(sk, req,
1327 (struct request_values *)&tmp_ext) ||
1328 want_cookie)
1329 goto drop_and_free;
1330
1331 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1332 return 0;
1333
1334drop_and_release:
1335 dst_release(dst);
1336drop_and_free:
1337 reqsk_free(req);
1338drop:
1339 return 0; /* don't send reset */
1340}
1341
1342static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1343 struct request_sock *req,
1344 struct dst_entry *dst)
1345{
1346 struct inet6_request_sock *treq;
1347 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1348 struct tcp6_sock *newtcp6sk;
1349 struct inet_sock *newinet;
1350 struct tcp_sock *newtp;
1351 struct sock *newsk;
1352 struct ipv6_txoptions *opt;
1353#ifdef CONFIG_TCP_MD5SIG
1354 struct tcp_md5sig_key *key;
1355#endif
1356
1357 if (skb->protocol == htons(ETH_P_IP)) {
1358 /*
1359 * v6 mapped
1360 */
1361
1362 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1363
1364 if (newsk == NULL)
1365 return NULL;
1366
1367 newtcp6sk = (struct tcp6_sock *)newsk;
1368 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1369
1370 newinet = inet_sk(newsk);
1371 newnp = inet6_sk(newsk);
1372 newtp = tcp_sk(newsk);
1373
1374 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1375
1376 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1377
1378 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1379
1380 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1381
1382 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1383 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1384#ifdef CONFIG_TCP_MD5SIG
1385 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1386#endif
1387
1388 newnp->ipv6_ac_list = NULL;
1389 newnp->ipv6_fl_list = NULL;
1390 newnp->pktoptions = NULL;
1391 newnp->opt = NULL;
1392 newnp->mcast_oif = inet6_iif(skb);
1393 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1394
1395 /*
1396 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1397 * here, tcp_create_openreq_child now does this for us, see the comment in
1398 * that function for the gory details. -acme
1399 */
1400
1401 /* It is tricky place. Until this moment IPv4 tcp
1402 worked with IPv6 icsk.icsk_af_ops.
1403 Sync it now.
1404 */
1405 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1406
1407 return newsk;
1408 }
1409
1410 treq = inet6_rsk(req);
1411 opt = np->opt;
1412
1413 if (sk_acceptq_is_full(sk))
1414 goto out_overflow;
1415
1416 if (!dst) {
1417 dst = inet6_csk_route_req(sk, req);
1418 if (!dst)
1419 goto out;
1420 }
1421
1422 newsk = tcp_create_openreq_child(sk, req, skb);
1423 if (newsk == NULL)
1424 goto out_nonewsk;
1425
1426 /*
1427 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1428 * count here, tcp_create_openreq_child now does this for us, see the
1429 * comment in that function for the gory details. -acme
1430 */
1431
1432 newsk->sk_gso_type = SKB_GSO_TCPV6;
1433 __ip6_dst_store(newsk, dst, NULL, NULL);
1434
1435 newtcp6sk = (struct tcp6_sock *)newsk;
1436 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1437
1438 newtp = tcp_sk(newsk);
1439 newinet = inet_sk(newsk);
1440 newnp = inet6_sk(newsk);
1441
1442 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1443
1444 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1445 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1446 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1447 newsk->sk_bound_dev_if = treq->iif;
1448
1449 /* Now IPv6 options...
1450
1451 First: no IPv4 options.
1452 */
1453 newinet->inet_opt = NULL;
1454 newnp->ipv6_ac_list = NULL;
1455 newnp->ipv6_fl_list = NULL;
1456
1457 /* Clone RX bits */
1458 newnp->rxopt.all = np->rxopt.all;
1459
1460 /* Clone pktoptions received with SYN */
1461 newnp->pktoptions = NULL;
1462 if (treq->pktopts != NULL) {
1463 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1464 kfree_skb(treq->pktopts);
1465 treq->pktopts = NULL;
1466 if (newnp->pktoptions)
1467 skb_set_owner_r(newnp->pktoptions, newsk);
1468 }
1469 newnp->opt = NULL;
1470 newnp->mcast_oif = inet6_iif(skb);
1471 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1472
1473 /* Clone native IPv6 options from listening socket (if any)
1474
1475 Yes, keeping reference count would be much more clever,
1476 but we make one more one thing there: reattach optmem
1477 to newsk.
1478 */
1479 if (opt) {
1480 newnp->opt = ipv6_dup_options(newsk, opt);
1481 if (opt != np->opt)
1482 sock_kfree_s(sk, opt, opt->tot_len);
1483 }
1484
1485 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1486 if (newnp->opt)
1487 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1488 newnp->opt->opt_flen);
1489
1490 tcp_mtup_init(newsk);
1491 tcp_sync_mss(newsk, dst_mtu(dst));
1492 newtp->advmss = dst_metric_advmss(dst);
1493 tcp_initialize_rcv_mss(newsk);
1494 if (tcp_rsk(req)->snt_synack)
1495 tcp_valid_rtt_meas(newsk,
1496 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1497 newtp->total_retrans = req->retrans;
1498
1499 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1500 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1501
1502#ifdef CONFIG_TCP_MD5SIG
1503 /* Copy over the MD5 key from the original socket */
1504 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1505 /* We're using one, so create a matching key
1506 * on the newsk structure. If we fail to get
1507 * memory, then we end up not copying the key
1508 * across. Shucks.
1509 */
1510 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1511 if (newkey != NULL)
1512 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1513 newkey, key->keylen);
1514 }
1515#endif
1516
1517 if (__inet_inherit_port(sk, newsk) < 0) {
1518 sock_put(newsk);
1519 goto out;
1520 }
1521 __inet6_hash(newsk, NULL);
1522
1523 return newsk;
1524
1525out_overflow:
1526 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1527out_nonewsk:
1528 if (opt && opt != np->opt)
1529 sock_kfree_s(sk, opt, opt->tot_len);
1530 dst_release(dst);
1531out:
1532 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1533 return NULL;
1534}
1535
1536static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1537{
1538 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1539 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1540 &ipv6_hdr(skb)->daddr, skb->csum)) {
1541 skb->ip_summed = CHECKSUM_UNNECESSARY;
1542 return 0;
1543 }
1544 }
1545
1546 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1547 &ipv6_hdr(skb)->saddr,
1548 &ipv6_hdr(skb)->daddr, 0));
1549
1550 if (skb->len <= 76) {
1551 return __skb_checksum_complete(skb);
1552 }
1553 return 0;
1554}
1555
1556/* The socket must have it's spinlock held when we get
1557 * here.
1558 *
1559 * We have a potential double-lock case here, so even when
1560 * doing backlog processing we use the BH locking scheme.
1561 * This is because we cannot sleep with the original spinlock
1562 * held.
1563 */
1564static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1565{
1566 struct ipv6_pinfo *np = inet6_sk(sk);
1567 struct tcp_sock *tp;
1568 struct sk_buff *opt_skb = NULL;
1569
1570 /* Imagine: socket is IPv6. IPv4 packet arrives,
1571 goes to IPv4 receive handler and backlogged.
1572 From backlog it always goes here. Kerboom...
1573 Fortunately, tcp_rcv_established and rcv_established
1574 handle them correctly, but it is not case with
1575 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1576 */
1577
1578 if (skb->protocol == htons(ETH_P_IP))
1579 return tcp_v4_do_rcv(sk, skb);
1580
1581#ifdef CONFIG_TCP_MD5SIG
1582 if (tcp_v6_inbound_md5_hash (sk, skb))
1583 goto discard;
1584#endif
1585
1586 if (sk_filter(sk, skb))
1587 goto discard;
1588
1589 /*
1590 * socket locking is here for SMP purposes as backlog rcv
1591 * is currently called with bh processing disabled.
1592 */
1593
1594 /* Do Stevens' IPV6_PKTOPTIONS.
1595
1596 Yes, guys, it is the only place in our code, where we
1597 may make it not affecting IPv4.
1598 The rest of code is protocol independent,
1599 and I do not like idea to uglify IPv4.
1600
1601 Actually, all the idea behind IPV6_PKTOPTIONS
1602 looks not very well thought. For now we latch
1603 options, received in the last packet, enqueued
1604 by tcp. Feel free to propose better solution.
1605 --ANK (980728)
1606 */
1607 if (np->rxopt.all)
1608 opt_skb = skb_clone(skb, GFP_ATOMIC);
1609
1610 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1611 sock_rps_save_rxhash(sk, skb->rxhash);
1612 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1613 goto reset;
1614 if (opt_skb)
1615 goto ipv6_pktoptions;
1616 return 0;
1617 }
1618
1619 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1620 goto csum_err;
1621
1622 if (sk->sk_state == TCP_LISTEN) {
1623 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1624 if (!nsk)
1625 goto discard;
1626
1627 /*
1628 * Queue it on the new socket if the new socket is active,
1629 * otherwise we just shortcircuit this and continue with
1630 * the new socket..
1631 */
1632 if(nsk != sk) {
1633 sock_rps_save_rxhash(nsk, skb->rxhash);
1634 if (tcp_child_process(sk, nsk, skb))
1635 goto reset;
1636 if (opt_skb)
1637 __kfree_skb(opt_skb);
1638 return 0;
1639 }
1640 } else
1641 sock_rps_save_rxhash(sk, skb->rxhash);
1642
1643 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1644 goto reset;
1645 if (opt_skb)
1646 goto ipv6_pktoptions;
1647 return 0;
1648
1649reset:
1650 tcp_v6_send_reset(sk, skb);
1651discard:
1652 if (opt_skb)
1653 __kfree_skb(opt_skb);
1654 kfree_skb(skb);
1655 return 0;
1656csum_err:
1657 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1658 goto discard;
1659
1660
1661ipv6_pktoptions:
1662 /* Do you ask, what is it?
1663
1664 1. skb was enqueued by tcp.
1665 2. skb is added to tail of read queue, rather than out of order.
1666 3. socket is not in passive state.
1667 4. Finally, it really contains options, which user wants to receive.
1668 */
1669 tp = tcp_sk(sk);
1670 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1671 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1672 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1673 np->mcast_oif = inet6_iif(opt_skb);
1674 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1675 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1676 if (ipv6_opt_accepted(sk, opt_skb)) {
1677 skb_set_owner_r(opt_skb, sk);
1678 opt_skb = xchg(&np->pktoptions, opt_skb);
1679 } else {
1680 __kfree_skb(opt_skb);
1681 opt_skb = xchg(&np->pktoptions, NULL);
1682 }
1683 }
1684
1685 kfree_skb(opt_skb);
1686 return 0;
1687}
1688
1689static int tcp_v6_rcv(struct sk_buff *skb)
1690{
1691 struct tcphdr *th;
1692 const struct ipv6hdr *hdr;
1693 struct sock *sk;
1694 int ret;
1695 struct net *net = dev_net(skb->dev);
1696
1697 if (skb->pkt_type != PACKET_HOST)
1698 goto discard_it;
1699
1700 /*
1701 * Count it even if it's bad.
1702 */
1703 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1704
1705 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1706 goto discard_it;
1707
1708 th = tcp_hdr(skb);
1709
1710 if (th->doff < sizeof(struct tcphdr)/4)
1711 goto bad_packet;
1712 if (!pskb_may_pull(skb, th->doff*4))
1713 goto discard_it;
1714
1715 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1716 goto bad_packet;
1717
1718 th = tcp_hdr(skb);
1719 hdr = ipv6_hdr(skb);
1720 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1721 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1722 skb->len - th->doff*4);
1723 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1724 TCP_SKB_CB(skb)->when = 0;
1725 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1726 TCP_SKB_CB(skb)->sacked = 0;
1727
1728 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1729 if (!sk)
1730 goto no_tcp_socket;
1731
1732process:
1733 if (sk->sk_state == TCP_TIME_WAIT)
1734 goto do_time_wait;
1735
1736 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1737 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1738 goto discard_and_relse;
1739 }
1740
1741 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1742 goto discard_and_relse;
1743
1744 if (sk_filter(sk, skb))
1745 goto discard_and_relse;
1746
1747 skb->dev = NULL;
1748
1749 bh_lock_sock_nested(sk);
1750 ret = 0;
1751 if (!sock_owned_by_user(sk)) {
1752#ifdef CONFIG_NET_DMA
1753 struct tcp_sock *tp = tcp_sk(sk);
1754 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1755 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1756 if (tp->ucopy.dma_chan)
1757 ret = tcp_v6_do_rcv(sk, skb);
1758 else
1759#endif
1760 {
1761 if (!tcp_prequeue(sk, skb))
1762 ret = tcp_v6_do_rcv(sk, skb);
1763 }
1764 } else if (unlikely(sk_add_backlog(sk, skb))) {
1765 bh_unlock_sock(sk);
1766 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1767 goto discard_and_relse;
1768 }
1769 bh_unlock_sock(sk);
1770
1771 sock_put(sk);
1772 return ret ? -1 : 0;
1773
1774no_tcp_socket:
1775 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1776 goto discard_it;
1777
1778 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1779bad_packet:
1780 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1781 } else {
1782 tcp_v6_send_reset(NULL, skb);
1783 }
1784
1785discard_it:
1786
1787 /*
1788 * Discard frame
1789 */
1790
1791 kfree_skb(skb);
1792 return 0;
1793
1794discard_and_relse:
1795 sock_put(sk);
1796 goto discard_it;
1797
1798do_time_wait:
1799 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1800 inet_twsk_put(inet_twsk(sk));
1801 goto discard_it;
1802 }
1803
1804 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1805 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1806 inet_twsk_put(inet_twsk(sk));
1807 goto discard_it;
1808 }
1809
1810 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1811 case TCP_TW_SYN:
1812 {
1813 struct sock *sk2;
1814
1815 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1816 &ipv6_hdr(skb)->daddr,
1817 ntohs(th->dest), inet6_iif(skb));
1818 if (sk2 != NULL) {
1819 struct inet_timewait_sock *tw = inet_twsk(sk);
1820 inet_twsk_deschedule(tw, &tcp_death_row);
1821 inet_twsk_put(tw);
1822 sk = sk2;
1823 goto process;
1824 }
1825 /* Fall through to ACK */
1826 }
1827 case TCP_TW_ACK:
1828 tcp_v6_timewait_ack(sk, skb);
1829 break;
1830 case TCP_TW_RST:
1831 goto no_tcp_socket;
1832 case TCP_TW_SUCCESS:;
1833 }
1834 goto discard_it;
1835}
1836
1837static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1838{
1839 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1840 struct ipv6_pinfo *np = inet6_sk(sk);
1841 struct inet_peer *peer;
1842
1843 if (!rt ||
1844 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1845 peer = inet_getpeer_v6(&np->daddr, 1);
1846 *release_it = true;
1847 } else {
1848 if (!rt->rt6i_peer)
1849 rt6_bind_peer(rt, 1);
1850 peer = rt->rt6i_peer;
1851 *release_it = false;
1852 }
1853
1854 return peer;
1855}
1856
1857static void *tcp_v6_tw_get_peer(struct sock *sk)
1858{
1859 struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1860 struct inet_timewait_sock *tw = inet_twsk(sk);
1861
1862 if (tw->tw_family == AF_INET)
1863 return tcp_v4_tw_get_peer(sk);
1864
1865 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1866}
1867
1868static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1869 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1870 .twsk_unique = tcp_twsk_unique,
1871 .twsk_destructor= tcp_twsk_destructor,
1872 .twsk_getpeer = tcp_v6_tw_get_peer,
1873};
1874
1875static const struct inet_connection_sock_af_ops ipv6_specific = {
1876 .queue_xmit = inet6_csk_xmit,
1877 .send_check = tcp_v6_send_check,
1878 .rebuild_header = inet6_sk_rebuild_header,
1879 .conn_request = tcp_v6_conn_request,
1880 .syn_recv_sock = tcp_v6_syn_recv_sock,
1881 .get_peer = tcp_v6_get_peer,
1882 .net_header_len = sizeof(struct ipv6hdr),
1883 .setsockopt = ipv6_setsockopt,
1884 .getsockopt = ipv6_getsockopt,
1885 .addr2sockaddr = inet6_csk_addr2sockaddr,
1886 .sockaddr_len = sizeof(struct sockaddr_in6),
1887 .bind_conflict = inet6_csk_bind_conflict,
1888#ifdef CONFIG_COMPAT
1889 .compat_setsockopt = compat_ipv6_setsockopt,
1890 .compat_getsockopt = compat_ipv6_getsockopt,
1891#endif
1892};
1893
1894#ifdef CONFIG_TCP_MD5SIG
1895static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1896 .md5_lookup = tcp_v6_md5_lookup,
1897 .calc_md5_hash = tcp_v6_md5_hash_skb,
1898 .md5_add = tcp_v6_md5_add_func,
1899 .md5_parse = tcp_v6_parse_md5_keys,
1900};
1901#endif
1902
1903/*
1904 * TCP over IPv4 via INET6 API
1905 */
1906
1907static const struct inet_connection_sock_af_ops ipv6_mapped = {
1908 .queue_xmit = ip_queue_xmit,
1909 .send_check = tcp_v4_send_check,
1910 .rebuild_header = inet_sk_rebuild_header,
1911 .conn_request = tcp_v6_conn_request,
1912 .syn_recv_sock = tcp_v6_syn_recv_sock,
1913 .get_peer = tcp_v4_get_peer,
1914 .net_header_len = sizeof(struct iphdr),
1915 .setsockopt = ipv6_setsockopt,
1916 .getsockopt = ipv6_getsockopt,
1917 .addr2sockaddr = inet6_csk_addr2sockaddr,
1918 .sockaddr_len = sizeof(struct sockaddr_in6),
1919 .bind_conflict = inet6_csk_bind_conflict,
1920#ifdef CONFIG_COMPAT
1921 .compat_setsockopt = compat_ipv6_setsockopt,
1922 .compat_getsockopt = compat_ipv6_getsockopt,
1923#endif
1924};
1925
1926#ifdef CONFIG_TCP_MD5SIG
1927static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1928 .md5_lookup = tcp_v4_md5_lookup,
1929 .calc_md5_hash = tcp_v4_md5_hash_skb,
1930 .md5_add = tcp_v6_md5_add_func,
1931 .md5_parse = tcp_v6_parse_md5_keys,
1932};
1933#endif
1934
1935/* NOTE: A lot of things set to zero explicitly by call to
1936 * sk_alloc() so need not be done here.
1937 */
1938static int tcp_v6_init_sock(struct sock *sk)
1939{
1940 struct inet_connection_sock *icsk = inet_csk(sk);
1941 struct tcp_sock *tp = tcp_sk(sk);
1942
1943 skb_queue_head_init(&tp->out_of_order_queue);
1944 tcp_init_xmit_timers(sk);
1945 tcp_prequeue_init(tp);
1946
1947 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1948 tp->mdev = TCP_TIMEOUT_INIT;
1949
1950 /* So many TCP implementations out there (incorrectly) count the
1951 * initial SYN frame in their delayed-ACK and congestion control
1952 * algorithms that we must have the following bandaid to talk
1953 * efficiently to them. -DaveM
1954 */
1955 tp->snd_cwnd = 2;
1956
1957 /* See draft-stevens-tcpca-spec-01 for discussion of the
1958 * initialization of these values.
1959 */
1960 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1961 tp->snd_cwnd_clamp = ~0;
1962 tp->mss_cache = TCP_MSS_DEFAULT;
1963
1964 tp->reordering = sysctl_tcp_reordering;
1965
1966 sk->sk_state = TCP_CLOSE;
1967
1968 icsk->icsk_af_ops = &ipv6_specific;
1969 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1970 icsk->icsk_sync_mss = tcp_sync_mss;
1971 sk->sk_write_space = sk_stream_write_space;
1972 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1973
1974#ifdef CONFIG_TCP_MD5SIG
1975 tp->af_specific = &tcp_sock_ipv6_specific;
1976#endif
1977
1978 /* TCP Cookie Transactions */
1979 if (sysctl_tcp_cookie_size > 0) {
1980 /* Default, cookies without s_data_payload. */
1981 tp->cookie_values =
1982 kzalloc(sizeof(*tp->cookie_values),
1983 sk->sk_allocation);
1984 if (tp->cookie_values != NULL)
1985 kref_init(&tp->cookie_values->kref);
1986 }
1987 /* Presumed zeroed, in order of appearance:
1988 * cookie_in_always, cookie_out_never,
1989 * s_data_constant, s_data_in, s_data_out
1990 */
1991 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1992 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1993
1994 local_bh_disable();
1995 percpu_counter_inc(&tcp_sockets_allocated);
1996 local_bh_enable();
1997
1998 return 0;
1999}
2000
2001static void tcp_v6_destroy_sock(struct sock *sk)
2002{
2003#ifdef CONFIG_TCP_MD5SIG
2004 /* Clean up the MD5 key list */
2005 if (tcp_sk(sk)->md5sig_info)
2006 tcp_v6_clear_md5_list(sk);
2007#endif
2008 tcp_v4_destroy_sock(sk);
2009 inet6_destroy_sock(sk);
2010}
2011
2012#ifdef CONFIG_PROC_FS
2013/* Proc filesystem TCPv6 sock list dumping. */
2014static void get_openreq6(struct seq_file *seq,
2015 struct sock *sk, struct request_sock *req, int i, int uid)
2016{
2017 int ttd = req->expires - jiffies;
2018 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2019 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2020
2021 if (ttd < 0)
2022 ttd = 0;
2023
2024 seq_printf(seq,
2025 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2026 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2027 i,
2028 src->s6_addr32[0], src->s6_addr32[1],
2029 src->s6_addr32[2], src->s6_addr32[3],
2030 ntohs(inet_rsk(req)->loc_port),
2031 dest->s6_addr32[0], dest->s6_addr32[1],
2032 dest->s6_addr32[2], dest->s6_addr32[3],
2033 ntohs(inet_rsk(req)->rmt_port),
2034 TCP_SYN_RECV,
2035 0,0, /* could print option size, but that is af dependent. */
2036 1, /* timers active (only the expire timer) */
2037 jiffies_to_clock_t(ttd),
2038 req->retrans,
2039 uid,
2040 0, /* non standard timer */
2041 0, /* open_requests have no inode */
2042 0, req);
2043}
2044
2045static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2046{
2047 const struct in6_addr *dest, *src;
2048 __u16 destp, srcp;
2049 int timer_active;
2050 unsigned long timer_expires;
2051 struct inet_sock *inet = inet_sk(sp);
2052 struct tcp_sock *tp = tcp_sk(sp);
2053 const struct inet_connection_sock *icsk = inet_csk(sp);
2054 struct ipv6_pinfo *np = inet6_sk(sp);
2055
2056 dest = &np->daddr;
2057 src = &np->rcv_saddr;
2058 destp = ntohs(inet->inet_dport);
2059 srcp = ntohs(inet->inet_sport);
2060
2061 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2062 timer_active = 1;
2063 timer_expires = icsk->icsk_timeout;
2064 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2065 timer_active = 4;
2066 timer_expires = icsk->icsk_timeout;
2067 } else if (timer_pending(&sp->sk_timer)) {
2068 timer_active = 2;
2069 timer_expires = sp->sk_timer.expires;
2070 } else {
2071 timer_active = 0;
2072 timer_expires = jiffies;
2073 }
2074
2075 seq_printf(seq,
2076 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2077 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2078 i,
2079 src->s6_addr32[0], src->s6_addr32[1],
2080 src->s6_addr32[2], src->s6_addr32[3], srcp,
2081 dest->s6_addr32[0], dest->s6_addr32[1],
2082 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2083 sp->sk_state,
2084 tp->write_seq-tp->snd_una,
2085 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2086 timer_active,
2087 jiffies_to_clock_t(timer_expires - jiffies),
2088 icsk->icsk_retransmits,
2089 sock_i_uid(sp),
2090 icsk->icsk_probes_out,
2091 sock_i_ino(sp),
2092 atomic_read(&sp->sk_refcnt), sp,
2093 jiffies_to_clock_t(icsk->icsk_rto),
2094 jiffies_to_clock_t(icsk->icsk_ack.ato),
2095 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2096 tp->snd_cwnd,
2097 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2098 );
2099}
2100
2101static void get_timewait6_sock(struct seq_file *seq,
2102 struct inet_timewait_sock *tw, int i)
2103{
2104 const struct in6_addr *dest, *src;
2105 __u16 destp, srcp;
2106 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2107 int ttd = tw->tw_ttd - jiffies;
2108
2109 if (ttd < 0)
2110 ttd = 0;
2111
2112 dest = &tw6->tw_v6_daddr;
2113 src = &tw6->tw_v6_rcv_saddr;
2114 destp = ntohs(tw->tw_dport);
2115 srcp = ntohs(tw->tw_sport);
2116
2117 seq_printf(seq,
2118 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2119 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2120 i,
2121 src->s6_addr32[0], src->s6_addr32[1],
2122 src->s6_addr32[2], src->s6_addr32[3], srcp,
2123 dest->s6_addr32[0], dest->s6_addr32[1],
2124 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2125 tw->tw_substate, 0, 0,
2126 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2127 atomic_read(&tw->tw_refcnt), tw);
2128}
2129
2130static int tcp6_seq_show(struct seq_file *seq, void *v)
2131{
2132 struct tcp_iter_state *st;
2133
2134 if (v == SEQ_START_TOKEN) {
2135 seq_puts(seq,
2136 " sl "
2137 "local_address "
2138 "remote_address "
2139 "st tx_queue rx_queue tr tm->when retrnsmt"
2140 " uid timeout inode\n");
2141 goto out;
2142 }
2143 st = seq->private;
2144
2145 switch (st->state) {
2146 case TCP_SEQ_STATE_LISTENING:
2147 case TCP_SEQ_STATE_ESTABLISHED:
2148 get_tcp6_sock(seq, v, st->num);
2149 break;
2150 case TCP_SEQ_STATE_OPENREQ:
2151 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2152 break;
2153 case TCP_SEQ_STATE_TIME_WAIT:
2154 get_timewait6_sock(seq, v, st->num);
2155 break;
2156 }
2157out:
2158 return 0;
2159}
2160
2161static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2162 .name = "tcp6",
2163 .family = AF_INET6,
2164 .seq_fops = {
2165 .owner = THIS_MODULE,
2166 },
2167 .seq_ops = {
2168 .show = tcp6_seq_show,
2169 },
2170};
2171
2172int __net_init tcp6_proc_init(struct net *net)
2173{
2174 return tcp_proc_register(net, &tcp6_seq_afinfo);
2175}
2176
2177void tcp6_proc_exit(struct net *net)
2178{
2179 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2180}
2181#endif
2182
2183struct proto tcpv6_prot = {
2184 .name = "TCPv6",
2185 .owner = THIS_MODULE,
2186 .close = tcp_close,
2187 .connect = tcp_v6_connect,
2188 .disconnect = tcp_disconnect,
2189 .accept = inet_csk_accept,
2190 .ioctl = tcp_ioctl,
2191 .init = tcp_v6_init_sock,
2192 .destroy = tcp_v6_destroy_sock,
2193 .shutdown = tcp_shutdown,
2194 .setsockopt = tcp_setsockopt,
2195 .getsockopt = tcp_getsockopt,
2196 .recvmsg = tcp_recvmsg,
2197 .sendmsg = tcp_sendmsg,
2198 .sendpage = tcp_sendpage,
2199 .backlog_rcv = tcp_v6_do_rcv,
2200 .hash = tcp_v6_hash,
2201 .unhash = inet_unhash,
2202 .get_port = inet_csk_get_port,
2203 .enter_memory_pressure = tcp_enter_memory_pressure,
2204 .sockets_allocated = &tcp_sockets_allocated,
2205 .memory_allocated = &tcp_memory_allocated,
2206 .memory_pressure = &tcp_memory_pressure,
2207 .orphan_count = &tcp_orphan_count,
2208 .sysctl_mem = sysctl_tcp_mem,
2209 .sysctl_wmem = sysctl_tcp_wmem,
2210 .sysctl_rmem = sysctl_tcp_rmem,
2211 .max_header = MAX_TCP_HEADER,
2212 .obj_size = sizeof(struct tcp6_sock),
2213 .slab_flags = SLAB_DESTROY_BY_RCU,
2214 .twsk_prot = &tcp6_timewait_sock_ops,
2215 .rsk_prot = &tcp6_request_sock_ops,
2216 .h.hashinfo = &tcp_hashinfo,
2217 .no_autobind = true,
2218#ifdef CONFIG_COMPAT
2219 .compat_setsockopt = compat_tcp_setsockopt,
2220 .compat_getsockopt = compat_tcp_getsockopt,
2221#endif
2222};
2223
2224static const struct inet6_protocol tcpv6_protocol = {
2225 .handler = tcp_v6_rcv,
2226 .err_handler = tcp_v6_err,
2227 .gso_send_check = tcp_v6_gso_send_check,
2228 .gso_segment = tcp_tso_segment,
2229 .gro_receive = tcp6_gro_receive,
2230 .gro_complete = tcp6_gro_complete,
2231 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2232};
2233
2234static struct inet_protosw tcpv6_protosw = {
2235 .type = SOCK_STREAM,
2236 .protocol = IPPROTO_TCP,
2237 .prot = &tcpv6_prot,
2238 .ops = &inet6_stream_ops,
2239 .no_check = 0,
2240 .flags = INET_PROTOSW_PERMANENT |
2241 INET_PROTOSW_ICSK,
2242};
2243
2244static int __net_init tcpv6_net_init(struct net *net)
2245{
2246 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2247 SOCK_RAW, IPPROTO_TCP, net);
2248}
2249
2250static void __net_exit tcpv6_net_exit(struct net *net)
2251{
2252 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2253}
2254
2255static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2256{
2257 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2258}
2259
2260static struct pernet_operations tcpv6_net_ops = {
2261 .init = tcpv6_net_init,
2262 .exit = tcpv6_net_exit,
2263 .exit_batch = tcpv6_net_exit_batch,
2264};
2265
2266int __init tcpv6_init(void)
2267{
2268 int ret;
2269
2270 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2271 if (ret)
2272 goto out;
2273
2274 /* register inet6 protocol */
2275 ret = inet6_register_protosw(&tcpv6_protosw);
2276 if (ret)
2277 goto out_tcpv6_protocol;
2278
2279 ret = register_pernet_subsys(&tcpv6_net_ops);
2280 if (ret)
2281 goto out_tcpv6_protosw;
2282out:
2283 return ret;
2284
2285out_tcpv6_protocol:
2286 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2287out_tcpv6_protosw:
2288 inet6_unregister_protosw(&tcpv6_protosw);
2289 goto out;
2290}
2291
2292void tcpv6_exit(void)
2293{
2294 unregister_pernet_subsys(&tcpv6_net_ops);
2295 inet6_unregister_protosw(&tcpv6_protosw);
2296 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2297}
1/*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26#include <linux/bottom_half.h>
27#include <linux/module.h>
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
41#include <linux/slab.h>
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/netdma.h>
63#include <net/inet_common.h>
64#include <net/secure_seq.h>
65#include <net/tcp_memcontrol.h>
66
67#include <asm/uaccess.h>
68
69#include <linux/proc_fs.h>
70#include <linux/seq_file.h>
71
72#include <linux/crypto.h>
73#include <linux/scatterlist.h>
74
75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
78
79static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80static void __tcp_v6_send_check(struct sk_buff *skb,
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
83
84static const struct inet_connection_sock_af_ops ipv6_mapped;
85static const struct inet_connection_sock_af_ops ipv6_specific;
86#ifdef CONFIG_TCP_MD5SIG
87static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89#else
90static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 const struct in6_addr *addr)
92{
93 return NULL;
94}
95#endif
96
97static void tcp_v6_hash(struct sock *sk)
98{
99 if (sk->sk_state != TCP_CLOSE) {
100 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
101 tcp_prot.hash(sk);
102 return;
103 }
104 local_bh_disable();
105 __inet6_hash(sk, NULL);
106 local_bh_enable();
107 }
108}
109
110static __inline__ __sum16 tcp_v6_check(int len,
111 const struct in6_addr *saddr,
112 const struct in6_addr *daddr,
113 __wsum base)
114{
115 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
116}
117
118static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119{
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 ipv6_hdr(skb)->saddr.s6_addr32,
122 tcp_hdr(skb)->dest,
123 tcp_hdr(skb)->source);
124}
125
126static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 int addr_len)
128{
129 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 struct inet_sock *inet = inet_sk(sk);
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct tcp_sock *tp = tcp_sk(sk);
134 struct in6_addr *saddr = NULL, *final_p, final;
135 struct rt6_info *rt;
136 struct flowi6 fl6;
137 struct dst_entry *dst;
138 int addr_type;
139 int err;
140
141 if (addr_len < SIN6_LEN_RFC2133)
142 return -EINVAL;
143
144 if (usin->sin6_family != AF_INET6)
145 return -EAFNOSUPPORT;
146
147 memset(&fl6, 0, sizeof(fl6));
148
149 if (np->sndflow) {
150 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 IP6_ECN_flow_init(fl6.flowlabel);
152 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 struct ip6_flowlabel *flowlabel;
154 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 if (flowlabel == NULL)
156 return -EINVAL;
157 usin->sin6_addr = flowlabel->dst;
158 fl6_sock_release(flowlabel);
159 }
160 }
161
162 /*
163 * connect() to INADDR_ANY means loopback (BSD'ism).
164 */
165
166 if(ipv6_addr_any(&usin->sin6_addr))
167 usin->sin6_addr.s6_addr[15] = 0x1;
168
169 addr_type = ipv6_addr_type(&usin->sin6_addr);
170
171 if(addr_type & IPV6_ADDR_MULTICAST)
172 return -ENETUNREACH;
173
174 if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 if (addr_len >= sizeof(struct sockaddr_in6) &&
176 usin->sin6_scope_id) {
177 /* If interface is set while binding, indices
178 * must coincide.
179 */
180 if (sk->sk_bound_dev_if &&
181 sk->sk_bound_dev_if != usin->sin6_scope_id)
182 return -EINVAL;
183
184 sk->sk_bound_dev_if = usin->sin6_scope_id;
185 }
186
187 /* Connect to link-local address requires an interface */
188 if (!sk->sk_bound_dev_if)
189 return -EINVAL;
190 }
191
192 if (tp->rx_opt.ts_recent_stamp &&
193 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
196 tp->write_seq = 0;
197 }
198
199 np->daddr = usin->sin6_addr;
200 np->flow_label = fl6.flowlabel;
201
202 /*
203 * TCP over IPv4
204 */
205
206 if (addr_type == IPV6_ADDR_MAPPED) {
207 u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 struct sockaddr_in sin;
209
210 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211
212 if (__ipv6_only_sock(sk))
213 return -ENETUNREACH;
214
215 sin.sin_family = AF_INET;
216 sin.sin_port = usin->sin6_port;
217 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218
219 icsk->icsk_af_ops = &ipv6_mapped;
220 sk->sk_backlog_rcv = tcp_v4_do_rcv;
221#ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
223#endif
224
225 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226
227 if (err) {
228 icsk->icsk_ext_hdr_len = exthdrlen;
229 icsk->icsk_af_ops = &ipv6_specific;
230 sk->sk_backlog_rcv = tcp_v6_do_rcv;
231#ifdef CONFIG_TCP_MD5SIG
232 tp->af_specific = &tcp_sock_ipv6_specific;
233#endif
234 goto failure;
235 } else {
236 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238 &np->rcv_saddr);
239 }
240
241 return err;
242 }
243
244 if (!ipv6_addr_any(&np->rcv_saddr))
245 saddr = &np->rcv_saddr;
246
247 fl6.flowi6_proto = IPPROTO_TCP;
248 fl6.daddr = np->daddr;
249 fl6.saddr = saddr ? *saddr : np->saddr;
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
254
255 final_p = fl6_update_dst(&fl6, np->opt, &final);
256
257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258
259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 if (IS_ERR(dst)) {
261 err = PTR_ERR(dst);
262 goto failure;
263 }
264
265 if (saddr == NULL) {
266 saddr = &fl6.saddr;
267 np->rcv_saddr = *saddr;
268 }
269
270 /* set the source address */
271 np->saddr = *saddr;
272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
276
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
296
297 icsk->icsk_ext_hdr_len = 0;
298 if (np->opt)
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 np->opt->opt_nflen);
301
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303
304 inet->inet_dport = usin->sin6_port;
305
306 tcp_set_state(sk, TCP_SYN_SENT);
307 err = inet6_hash_connect(&tcp_death_row, sk);
308 if (err)
309 goto late_failure;
310
311 if (!tp->write_seq)
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 np->daddr.s6_addr32,
314 inet->inet_sport,
315 inet->inet_dport);
316
317 err = tcp_connect(sk);
318 if (err)
319 goto late_failure;
320
321 return 0;
322
323late_failure:
324 tcp_set_state(sk, TCP_CLOSE);
325 __sk_dst_reset(sk);
326failure:
327 inet->inet_dport = 0;
328 sk->sk_route_caps = 0;
329 return err;
330}
331
332static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info)
334{
335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 struct ipv6_pinfo *np;
338 struct sock *sk;
339 int err;
340 struct tcp_sock *tp;
341 __u32 seq;
342 struct net *net = dev_net(skb->dev);
343
344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
346
347 if (sk == NULL) {
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 ICMP6_MIB_INERRORS);
350 return;
351 }
352
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
355 return;
356 }
357
358 bh_lock_sock(sk);
359 if (sock_owned_by_user(sk))
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361
362 if (sk->sk_state == TCP_CLOSE)
363 goto out;
364
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 goto out;
368 }
369
370 tp = tcp_sk(sk);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 goto out;
376 }
377
378 np = inet6_sk(sk);
379
380 if (type == ICMPV6_PKT_TOOBIG) {
381 struct dst_entry *dst;
382
383 if (sock_owned_by_user(sk))
384 goto out;
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 goto out;
387
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
390
391 if (dst == NULL) {
392 struct inet_sock *inet = inet_sk(sk);
393 struct flowi6 fl6;
394
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
397 for now.
398 */
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 fl6.daddr = np->daddr;
402 fl6.saddr = np->saddr;
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
412 goto out;
413 }
414
415 } else
416 dst_hold(dst);
417
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
422 dst_release(dst);
423 goto out;
424 }
425
426 icmpv6_err_convert(type, code, &err);
427
428 /* Might be for an request_sock */
429 switch (sk->sk_state) {
430 struct request_sock *req, **prev;
431 case TCP_LISTEN:
432 if (sock_owned_by_user(sk))
433 goto out;
434
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
437 if (!req)
438 goto out;
439
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
442 */
443 WARN_ON(req->sk != NULL);
444
445 if (seq != tcp_rsk(req)->snt_isn) {
446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 goto out;
448 }
449
450 inet_csk_reqsk_queue_drop(sk, req, prev);
451 goto out;
452
453 case TCP_SYN_SENT:
454 case TCP_SYN_RECV: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk)) {
457 sk->sk_err = err;
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
459
460 tcp_done(sk);
461 } else
462 sk->sk_err_soft = err;
463 goto out;
464 }
465
466 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_err = err;
468 sk->sk_error_report(sk);
469 } else
470 sk->sk_err_soft = err;
471
472out:
473 bh_unlock_sock(sk);
474 sock_put(sk);
475}
476
477
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp,
480 u16 queue_mapping)
481{
482 struct inet6_request_sock *treq = inet6_rsk(req);
483 struct ipv6_pinfo *np = inet6_sk(sk);
484 struct sk_buff * skb;
485 struct ipv6_txoptions *opt = NULL;
486 struct in6_addr * final_p, final;
487 struct flowi6 fl6;
488 struct dst_entry *dst;
489 int err;
490
491 memset(&fl6, 0, sizeof(fl6));
492 fl6.flowi6_proto = IPPROTO_TCP;
493 fl6.daddr = treq->rmt_addr;
494 fl6.saddr = treq->loc_addr;
495 fl6.flowlabel = 0;
496 fl6.flowi6_oif = treq->iif;
497 fl6.flowi6_mark = sk->sk_mark;
498 fl6.fl6_dport = inet_rsk(req)->rmt_port;
499 fl6.fl6_sport = inet_rsk(req)->loc_port;
500 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501
502 opt = np->opt;
503 final_p = fl6_update_dst(&fl6, opt, &final);
504
505 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
506 if (IS_ERR(dst)) {
507 err = PTR_ERR(dst);
508 dst = NULL;
509 goto done;
510 }
511 skb = tcp_make_synack(sk, dst, req, rvp);
512 err = -ENOMEM;
513 if (skb) {
514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515
516 fl6.daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
519 err = net_xmit_eval(err);
520 }
521
522done:
523 if (opt && opt != np->opt)
524 sock_kfree_s(sk, opt, opt->tot_len);
525 dst_release(dst);
526 return err;
527}
528
529static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
530 struct request_values *rvp)
531{
532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
533 return tcp_v6_send_synack(sk, req, rvp, 0);
534}
535
536static void tcp_v6_reqsk_destructor(struct request_sock *req)
537{
538 kfree_skb(inet6_rsk(req)->pktopts);
539}
540
541#ifdef CONFIG_TCP_MD5SIG
542static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
543 const struct in6_addr *addr)
544{
545 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
546}
547
548static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
549 struct sock *addr_sk)
550{
551 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552}
553
554static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
555 struct request_sock *req)
556{
557 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558}
559
560static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
561 int optlen)
562{
563 struct tcp_md5sig cmd;
564 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
565
566 if (optlen < sizeof(cmd))
567 return -EINVAL;
568
569 if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 return -EFAULT;
571
572 if (sin6->sin6_family != AF_INET6)
573 return -EINVAL;
574
575 if (!cmd.tcpm_keylen) {
576 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
578 AF_INET);
579 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
580 AF_INET6);
581 }
582
583 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
584 return -EINVAL;
585
586 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
587 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
588 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
589
590 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
591 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
592}
593
594static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
595 const struct in6_addr *daddr,
596 const struct in6_addr *saddr, int nbytes)
597{
598 struct tcp6_pseudohdr *bp;
599 struct scatterlist sg;
600
601 bp = &hp->md5_blk.ip6;
602 /* 1. TCP pseudo-header (RFC2460) */
603 bp->saddr = *saddr;
604 bp->daddr = *daddr;
605 bp->protocol = cpu_to_be32(IPPROTO_TCP);
606 bp->len = cpu_to_be32(nbytes);
607
608 sg_init_one(&sg, bp, sizeof(*bp));
609 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
610}
611
612static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
613 const struct in6_addr *daddr, struct in6_addr *saddr,
614 const struct tcphdr *th)
615{
616 struct tcp_md5sig_pool *hp;
617 struct hash_desc *desc;
618
619 hp = tcp_get_md5sig_pool();
620 if (!hp)
621 goto clear_hash_noput;
622 desc = &hp->md5_desc;
623
624 if (crypto_hash_init(desc))
625 goto clear_hash;
626 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
627 goto clear_hash;
628 if (tcp_md5_hash_header(hp, th))
629 goto clear_hash;
630 if (tcp_md5_hash_key(hp, key))
631 goto clear_hash;
632 if (crypto_hash_final(desc, md5_hash))
633 goto clear_hash;
634
635 tcp_put_md5sig_pool();
636 return 0;
637
638clear_hash:
639 tcp_put_md5sig_pool();
640clear_hash_noput:
641 memset(md5_hash, 0, 16);
642 return 1;
643}
644
645static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
646 const struct sock *sk,
647 const struct request_sock *req,
648 const struct sk_buff *skb)
649{
650 const struct in6_addr *saddr, *daddr;
651 struct tcp_md5sig_pool *hp;
652 struct hash_desc *desc;
653 const struct tcphdr *th = tcp_hdr(skb);
654
655 if (sk) {
656 saddr = &inet6_sk(sk)->saddr;
657 daddr = &inet6_sk(sk)->daddr;
658 } else if (req) {
659 saddr = &inet6_rsk(req)->loc_addr;
660 daddr = &inet6_rsk(req)->rmt_addr;
661 } else {
662 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
663 saddr = &ip6h->saddr;
664 daddr = &ip6h->daddr;
665 }
666
667 hp = tcp_get_md5sig_pool();
668 if (!hp)
669 goto clear_hash_noput;
670 desc = &hp->md5_desc;
671
672 if (crypto_hash_init(desc))
673 goto clear_hash;
674
675 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
676 goto clear_hash;
677 if (tcp_md5_hash_header(hp, th))
678 goto clear_hash;
679 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
680 goto clear_hash;
681 if (tcp_md5_hash_key(hp, key))
682 goto clear_hash;
683 if (crypto_hash_final(desc, md5_hash))
684 goto clear_hash;
685
686 tcp_put_md5sig_pool();
687 return 0;
688
689clear_hash:
690 tcp_put_md5sig_pool();
691clear_hash_noput:
692 memset(md5_hash, 0, 16);
693 return 1;
694}
695
696static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
697{
698 const __u8 *hash_location = NULL;
699 struct tcp_md5sig_key *hash_expected;
700 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
701 const struct tcphdr *th = tcp_hdr(skb);
702 int genhash;
703 u8 newhash[16];
704
705 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
706 hash_location = tcp_parse_md5sig_option(th);
707
708 /* We've parsed the options - do we have a hash? */
709 if (!hash_expected && !hash_location)
710 return 0;
711
712 if (hash_expected && !hash_location) {
713 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
714 return 1;
715 }
716
717 if (!hash_expected && hash_location) {
718 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
719 return 1;
720 }
721
722 /* check the signature */
723 genhash = tcp_v6_md5_hash_skb(newhash,
724 hash_expected,
725 NULL, NULL, skb);
726
727 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
728 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
729 genhash ? "failed" : "mismatch",
730 &ip6h->saddr, ntohs(th->source),
731 &ip6h->daddr, ntohs(th->dest));
732 return 1;
733 }
734 return 0;
735}
736#endif
737
738struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
739 .family = AF_INET6,
740 .obj_size = sizeof(struct tcp6_request_sock),
741 .rtx_syn_ack = tcp_v6_rtx_synack,
742 .send_ack = tcp_v6_reqsk_send_ack,
743 .destructor = tcp_v6_reqsk_destructor,
744 .send_reset = tcp_v6_send_reset,
745 .syn_ack_timeout = tcp_syn_ack_timeout,
746};
747
748#ifdef CONFIG_TCP_MD5SIG
749static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
750 .md5_lookup = tcp_v6_reqsk_md5_lookup,
751 .calc_md5_hash = tcp_v6_md5_hash_skb,
752};
753#endif
754
755static void __tcp_v6_send_check(struct sk_buff *skb,
756 const struct in6_addr *saddr, const struct in6_addr *daddr)
757{
758 struct tcphdr *th = tcp_hdr(skb);
759
760 if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
762 skb->csum_start = skb_transport_header(skb) - skb->head;
763 skb->csum_offset = offsetof(struct tcphdr, check);
764 } else {
765 th->check = tcp_v6_check(skb->len, saddr, daddr,
766 csum_partial(th, th->doff << 2,
767 skb->csum));
768 }
769}
770
771static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
772{
773 struct ipv6_pinfo *np = inet6_sk(sk);
774
775 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
776}
777
778static int tcp_v6_gso_send_check(struct sk_buff *skb)
779{
780 const struct ipv6hdr *ipv6h;
781 struct tcphdr *th;
782
783 if (!pskb_may_pull(skb, sizeof(*th)))
784 return -EINVAL;
785
786 ipv6h = ipv6_hdr(skb);
787 th = tcp_hdr(skb);
788
789 th->check = 0;
790 skb->ip_summed = CHECKSUM_PARTIAL;
791 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
792 return 0;
793}
794
795static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
796 struct sk_buff *skb)
797{
798 const struct ipv6hdr *iph = skb_gro_network_header(skb);
799
800 switch (skb->ip_summed) {
801 case CHECKSUM_COMPLETE:
802 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
803 skb->csum)) {
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
805 break;
806 }
807
808 /* fall through */
809 case CHECKSUM_NONE:
810 NAPI_GRO_CB(skb)->flush = 1;
811 return NULL;
812 }
813
814 return tcp_gro_receive(head, skb);
815}
816
817static int tcp6_gro_complete(struct sk_buff *skb)
818{
819 const struct ipv6hdr *iph = ipv6_hdr(skb);
820 struct tcphdr *th = tcp_hdr(skb);
821
822 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
823 &iph->saddr, &iph->daddr, 0);
824 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
825
826 return tcp_gro_complete(skb);
827}
828
829static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
830 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
831{
832 const struct tcphdr *th = tcp_hdr(skb);
833 struct tcphdr *t1;
834 struct sk_buff *buff;
835 struct flowi6 fl6;
836 struct net *net = dev_net(skb_dst(skb)->dev);
837 struct sock *ctl_sk = net->ipv6.tcp_sk;
838 unsigned int tot_len = sizeof(struct tcphdr);
839 struct dst_entry *dst;
840 __be32 *topt;
841
842 if (ts)
843 tot_len += TCPOLEN_TSTAMP_ALIGNED;
844#ifdef CONFIG_TCP_MD5SIG
845 if (key)
846 tot_len += TCPOLEN_MD5SIG_ALIGNED;
847#endif
848
849 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
850 GFP_ATOMIC);
851 if (buff == NULL)
852 return;
853
854 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
855
856 t1 = (struct tcphdr *) skb_push(buff, tot_len);
857 skb_reset_transport_header(buff);
858
859 /* Swap the send and the receive. */
860 memset(t1, 0, sizeof(*t1));
861 t1->dest = th->source;
862 t1->source = th->dest;
863 t1->doff = tot_len / 4;
864 t1->seq = htonl(seq);
865 t1->ack_seq = htonl(ack);
866 t1->ack = !rst || !th->ack;
867 t1->rst = rst;
868 t1->window = htons(win);
869
870 topt = (__be32 *)(t1 + 1);
871
872 if (ts) {
873 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
874 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
875 *topt++ = htonl(tcp_time_stamp);
876 *topt++ = htonl(ts);
877 }
878
879#ifdef CONFIG_TCP_MD5SIG
880 if (key) {
881 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
882 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
883 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
884 &ipv6_hdr(skb)->saddr,
885 &ipv6_hdr(skb)->daddr, t1);
886 }
887#endif
888
889 memset(&fl6, 0, sizeof(fl6));
890 fl6.daddr = ipv6_hdr(skb)->saddr;
891 fl6.saddr = ipv6_hdr(skb)->daddr;
892
893 buff->ip_summed = CHECKSUM_PARTIAL;
894 buff->csum = 0;
895
896 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
897
898 fl6.flowi6_proto = IPPROTO_TCP;
899 fl6.flowi6_oif = inet6_iif(skb);
900 fl6.fl6_dport = t1->dest;
901 fl6.fl6_sport = t1->source;
902 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
903
904 /* Pass a socket to ip6_dst_lookup either it is for RST
905 * Underlying function will use this to retrieve the network
906 * namespace
907 */
908 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
909 if (!IS_ERR(dst)) {
910 skb_dst_set(buff, dst);
911 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
912 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
913 if (rst)
914 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
915 return;
916 }
917
918 kfree_skb(buff);
919}
920
921static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
922{
923 const struct tcphdr *th = tcp_hdr(skb);
924 u32 seq = 0, ack_seq = 0;
925 struct tcp_md5sig_key *key = NULL;
926#ifdef CONFIG_TCP_MD5SIG
927 const __u8 *hash_location = NULL;
928 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
929 unsigned char newhash[16];
930 int genhash;
931 struct sock *sk1 = NULL;
932#endif
933
934 if (th->rst)
935 return;
936
937 if (!ipv6_unicast_destination(skb))
938 return;
939
940#ifdef CONFIG_TCP_MD5SIG
941 hash_location = tcp_parse_md5sig_option(th);
942 if (!sk && hash_location) {
943 /*
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
949 */
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 &tcp_hashinfo, &ipv6h->daddr,
952 ntohs(th->source), inet6_iif(skb));
953 if (!sk1)
954 return;
955
956 rcu_read_lock();
957 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
958 if (!key)
959 goto release_sk1;
960
961 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
962 if (genhash || memcmp(hash_location, newhash, 16) != 0)
963 goto release_sk1;
964 } else {
965 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
966 }
967#endif
968
969 if (th->ack)
970 seq = ntohl(th->ack_seq);
971 else
972 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
973 (th->doff << 2);
974
975 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
976
977#ifdef CONFIG_TCP_MD5SIG
978release_sk1:
979 if (sk1) {
980 rcu_read_unlock();
981 sock_put(sk1);
982 }
983#endif
984}
985
986static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
987 struct tcp_md5sig_key *key, u8 tclass)
988{
989 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
990}
991
992static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
993{
994 struct inet_timewait_sock *tw = inet_twsk(sk);
995 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
996
997 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
998 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
999 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1000 tw->tw_tclass);
1001
1002 inet_twsk_put(tw);
1003}
1004
1005static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1006 struct request_sock *req)
1007{
1008 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1009 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1010}
1011
1012
1013static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1014{
1015 struct request_sock *req, **prev;
1016 const struct tcphdr *th = tcp_hdr(skb);
1017 struct sock *nsk;
1018
1019 /* Find possible connection requests. */
1020 req = inet6_csk_search_req(sk, &prev, th->source,
1021 &ipv6_hdr(skb)->saddr,
1022 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1023 if (req)
1024 return tcp_check_req(sk, skb, req, prev);
1025
1026 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1027 &ipv6_hdr(skb)->saddr, th->source,
1028 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1029
1030 if (nsk) {
1031 if (nsk->sk_state != TCP_TIME_WAIT) {
1032 bh_lock_sock(nsk);
1033 return nsk;
1034 }
1035 inet_twsk_put(inet_twsk(nsk));
1036 return NULL;
1037 }
1038
1039#ifdef CONFIG_SYN_COOKIES
1040 if (!th->syn)
1041 sk = cookie_v6_check(sk, skb);
1042#endif
1043 return sk;
1044}
1045
1046/* FIXME: this is substantially similar to the ipv4 code.
1047 * Can some kind of merge be done? -- erics
1048 */
1049static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1050{
1051 struct tcp_extend_values tmp_ext;
1052 struct tcp_options_received tmp_opt;
1053 const u8 *hash_location;
1054 struct request_sock *req;
1055 struct inet6_request_sock *treq;
1056 struct ipv6_pinfo *np = inet6_sk(sk);
1057 struct tcp_sock *tp = tcp_sk(sk);
1058 __u32 isn = TCP_SKB_CB(skb)->when;
1059 struct dst_entry *dst = NULL;
1060 bool want_cookie = false;
1061
1062 if (skb->protocol == htons(ETH_P_IP))
1063 return tcp_v4_conn_request(sk, skb);
1064
1065 if (!ipv6_unicast_destination(skb))
1066 goto drop;
1067
1068 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1069 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1070 if (!want_cookie)
1071 goto drop;
1072 }
1073
1074 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1075 goto drop;
1076
1077 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1078 if (req == NULL)
1079 goto drop;
1080
1081#ifdef CONFIG_TCP_MD5SIG
1082 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1083#endif
1084
1085 tcp_clear_options(&tmp_opt);
1086 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1087 tmp_opt.user_mss = tp->rx_opt.user_mss;
1088 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1089
1090 if (tmp_opt.cookie_plus > 0 &&
1091 tmp_opt.saw_tstamp &&
1092 !tp->rx_opt.cookie_out_never &&
1093 (sysctl_tcp_cookie_size > 0 ||
1094 (tp->cookie_values != NULL &&
1095 tp->cookie_values->cookie_desired > 0))) {
1096 u8 *c;
1097 u32 *d;
1098 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1099 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1100
1101 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1102 goto drop_and_free;
1103
1104 /* Secret recipe starts with IP addresses */
1105 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1106 *mess++ ^= *d++;
1107 *mess++ ^= *d++;
1108 *mess++ ^= *d++;
1109 *mess++ ^= *d++;
1110 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1111 *mess++ ^= *d++;
1112 *mess++ ^= *d++;
1113 *mess++ ^= *d++;
1114 *mess++ ^= *d++;
1115
1116 /* plus variable length Initiator Cookie */
1117 c = (u8 *)mess;
1118 while (l-- > 0)
1119 *c++ ^= *hash_location++;
1120
1121 want_cookie = false; /* not our kind of cookie */
1122 tmp_ext.cookie_out_never = 0; /* false */
1123 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1124 } else if (!tp->rx_opt.cookie_in_always) {
1125 /* redundant indications, but ensure initialization. */
1126 tmp_ext.cookie_out_never = 1; /* true */
1127 tmp_ext.cookie_plus = 0;
1128 } else {
1129 goto drop_and_free;
1130 }
1131 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1132
1133 if (want_cookie && !tmp_opt.saw_tstamp)
1134 tcp_clear_options(&tmp_opt);
1135
1136 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1137 tcp_openreq_init(req, &tmp_opt, skb);
1138
1139 treq = inet6_rsk(req);
1140 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141 treq->loc_addr = ipv6_hdr(skb)->daddr;
1142 if (!want_cookie || tmp_opt.tstamp_ok)
1143 TCP_ECN_create_request(req, skb);
1144
1145 treq->iif = sk->sk_bound_dev_if;
1146
1147 /* So that link locals have meaning */
1148 if (!sk->sk_bound_dev_if &&
1149 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1150 treq->iif = inet6_iif(skb);
1151
1152 if (!isn) {
1153 struct inet_peer *peer = NULL;
1154
1155 if (ipv6_opt_accepted(sk, skb) ||
1156 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1158 atomic_inc(&skb->users);
1159 treq->pktopts = skb;
1160 }
1161
1162 if (want_cookie) {
1163 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1164 req->cookie_ts = tmp_opt.tstamp_ok;
1165 goto have_isn;
1166 }
1167
1168 /* VJ's idea. We save last timestamp seen
1169 * from the destination in peer table, when entering
1170 * state TIME-WAIT, and check against it before
1171 * accepting new connection request.
1172 *
1173 * If "isn" is not zero, this request hit alive
1174 * timewait bucket, so that all the necessary checks
1175 * are made in the function processing timewait state.
1176 */
1177 if (tmp_opt.saw_tstamp &&
1178 tcp_death_row.sysctl_tw_recycle &&
1179 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1180 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1181 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182 &treq->rmt_addr)) {
1183 inet_peer_refcheck(peer);
1184 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185 (s32)(peer->tcp_ts - req->ts_recent) >
1186 TCP_PAWS_WINDOW) {
1187 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188 goto drop_and_release;
1189 }
1190 }
1191 /* Kill the following clause, if you dislike this way. */
1192 else if (!sysctl_tcp_syncookies &&
1193 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194 (sysctl_max_syn_backlog >> 2)) &&
1195 (!peer || !peer->tcp_ts_stamp) &&
1196 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197 /* Without syncookies last quarter of
1198 * backlog is filled with destinations,
1199 * proven to be alive.
1200 * It means that we continue to communicate
1201 * to destinations, already remembered
1202 * to the moment of synflood.
1203 */
1204 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1205 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1206 goto drop_and_release;
1207 }
1208
1209 isn = tcp_v6_init_sequence(skb);
1210 }
1211have_isn:
1212 tcp_rsk(req)->snt_isn = isn;
1213 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1214
1215 if (security_inet_conn_request(sk, skb, req))
1216 goto drop_and_release;
1217
1218 if (tcp_v6_send_synack(sk, req,
1219 (struct request_values *)&tmp_ext,
1220 skb_get_queue_mapping(skb)) ||
1221 want_cookie)
1222 goto drop_and_free;
1223
1224 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1225 return 0;
1226
1227drop_and_release:
1228 dst_release(dst);
1229drop_and_free:
1230 reqsk_free(req);
1231drop:
1232 return 0; /* don't send reset */
1233}
1234
1235static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1236 struct request_sock *req,
1237 struct dst_entry *dst)
1238{
1239 struct inet6_request_sock *treq;
1240 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1241 struct tcp6_sock *newtcp6sk;
1242 struct inet_sock *newinet;
1243 struct tcp_sock *newtp;
1244 struct sock *newsk;
1245 struct ipv6_txoptions *opt;
1246#ifdef CONFIG_TCP_MD5SIG
1247 struct tcp_md5sig_key *key;
1248#endif
1249
1250 if (skb->protocol == htons(ETH_P_IP)) {
1251 /*
1252 * v6 mapped
1253 */
1254
1255 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1256
1257 if (newsk == NULL)
1258 return NULL;
1259
1260 newtcp6sk = (struct tcp6_sock *)newsk;
1261 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1262
1263 newinet = inet_sk(newsk);
1264 newnp = inet6_sk(newsk);
1265 newtp = tcp_sk(newsk);
1266
1267 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1268
1269 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1270
1271 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1272
1273 newnp->rcv_saddr = newnp->saddr;
1274
1275 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1276 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1277#ifdef CONFIG_TCP_MD5SIG
1278 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1279#endif
1280
1281 newnp->ipv6_ac_list = NULL;
1282 newnp->ipv6_fl_list = NULL;
1283 newnp->pktoptions = NULL;
1284 newnp->opt = NULL;
1285 newnp->mcast_oif = inet6_iif(skb);
1286 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1287 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1288
1289 /*
1290 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1291 * here, tcp_create_openreq_child now does this for us, see the comment in
1292 * that function for the gory details. -acme
1293 */
1294
1295 /* It is tricky place. Until this moment IPv4 tcp
1296 worked with IPv6 icsk.icsk_af_ops.
1297 Sync it now.
1298 */
1299 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1300
1301 return newsk;
1302 }
1303
1304 treq = inet6_rsk(req);
1305 opt = np->opt;
1306
1307 if (sk_acceptq_is_full(sk))
1308 goto out_overflow;
1309
1310 if (!dst) {
1311 dst = inet6_csk_route_req(sk, req);
1312 if (!dst)
1313 goto out;
1314 }
1315
1316 newsk = tcp_create_openreq_child(sk, req, skb);
1317 if (newsk == NULL)
1318 goto out_nonewsk;
1319
1320 /*
1321 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1322 * count here, tcp_create_openreq_child now does this for us, see the
1323 * comment in that function for the gory details. -acme
1324 */
1325
1326 newsk->sk_gso_type = SKB_GSO_TCPV6;
1327 __ip6_dst_store(newsk, dst, NULL, NULL);
1328
1329 newtcp6sk = (struct tcp6_sock *)newsk;
1330 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1331
1332 newtp = tcp_sk(newsk);
1333 newinet = inet_sk(newsk);
1334 newnp = inet6_sk(newsk);
1335
1336 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1337
1338 newnp->daddr = treq->rmt_addr;
1339 newnp->saddr = treq->loc_addr;
1340 newnp->rcv_saddr = treq->loc_addr;
1341 newsk->sk_bound_dev_if = treq->iif;
1342
1343 /* Now IPv6 options...
1344
1345 First: no IPv4 options.
1346 */
1347 newinet->inet_opt = NULL;
1348 newnp->ipv6_ac_list = NULL;
1349 newnp->ipv6_fl_list = NULL;
1350
1351 /* Clone RX bits */
1352 newnp->rxopt.all = np->rxopt.all;
1353
1354 /* Clone pktoptions received with SYN */
1355 newnp->pktoptions = NULL;
1356 if (treq->pktopts != NULL) {
1357 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1358 consume_skb(treq->pktopts);
1359 treq->pktopts = NULL;
1360 if (newnp->pktoptions)
1361 skb_set_owner_r(newnp->pktoptions, newsk);
1362 }
1363 newnp->opt = NULL;
1364 newnp->mcast_oif = inet6_iif(skb);
1365 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1366 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1367
1368 /* Clone native IPv6 options from listening socket (if any)
1369
1370 Yes, keeping reference count would be much more clever,
1371 but we make one more one thing there: reattach optmem
1372 to newsk.
1373 */
1374 if (opt) {
1375 newnp->opt = ipv6_dup_options(newsk, opt);
1376 if (opt != np->opt)
1377 sock_kfree_s(sk, opt, opt->tot_len);
1378 }
1379
1380 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1381 if (newnp->opt)
1382 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1383 newnp->opt->opt_flen);
1384
1385 tcp_mtup_init(newsk);
1386 tcp_sync_mss(newsk, dst_mtu(dst));
1387 newtp->advmss = dst_metric_advmss(dst);
1388 if (tcp_sk(sk)->rx_opt.user_mss &&
1389 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1390 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1391
1392 tcp_initialize_rcv_mss(newsk);
1393 if (tcp_rsk(req)->snt_synack)
1394 tcp_valid_rtt_meas(newsk,
1395 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1396 newtp->total_retrans = req->retrans;
1397
1398 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1399 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1400
1401#ifdef CONFIG_TCP_MD5SIG
1402 /* Copy over the MD5 key from the original socket */
1403 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1404 /* We're using one, so create a matching key
1405 * on the newsk structure. If we fail to get
1406 * memory, then we end up not copying the key
1407 * across. Shucks.
1408 */
1409 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1410 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1411 }
1412#endif
1413
1414 if (__inet_inherit_port(sk, newsk) < 0) {
1415 sock_put(newsk);
1416 goto out;
1417 }
1418 __inet6_hash(newsk, NULL);
1419
1420 return newsk;
1421
1422out_overflow:
1423 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1424out_nonewsk:
1425 if (opt && opt != np->opt)
1426 sock_kfree_s(sk, opt, opt->tot_len);
1427 dst_release(dst);
1428out:
1429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1430 return NULL;
1431}
1432
1433static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1434{
1435 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1436 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1437 &ipv6_hdr(skb)->daddr, skb->csum)) {
1438 skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 return 0;
1440 }
1441 }
1442
1443 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1444 &ipv6_hdr(skb)->saddr,
1445 &ipv6_hdr(skb)->daddr, 0));
1446
1447 if (skb->len <= 76) {
1448 return __skb_checksum_complete(skb);
1449 }
1450 return 0;
1451}
1452
1453/* The socket must have it's spinlock held when we get
1454 * here.
1455 *
1456 * We have a potential double-lock case here, so even when
1457 * doing backlog processing we use the BH locking scheme.
1458 * This is because we cannot sleep with the original spinlock
1459 * held.
1460 */
1461static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1462{
1463 struct ipv6_pinfo *np = inet6_sk(sk);
1464 struct tcp_sock *tp;
1465 struct sk_buff *opt_skb = NULL;
1466
1467 /* Imagine: socket is IPv6. IPv4 packet arrives,
1468 goes to IPv4 receive handler and backlogged.
1469 From backlog it always goes here. Kerboom...
1470 Fortunately, tcp_rcv_established and rcv_established
1471 handle them correctly, but it is not case with
1472 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1473 */
1474
1475 if (skb->protocol == htons(ETH_P_IP))
1476 return tcp_v4_do_rcv(sk, skb);
1477
1478#ifdef CONFIG_TCP_MD5SIG
1479 if (tcp_v6_inbound_md5_hash (sk, skb))
1480 goto discard;
1481#endif
1482
1483 if (sk_filter(sk, skb))
1484 goto discard;
1485
1486 /*
1487 * socket locking is here for SMP purposes as backlog rcv
1488 * is currently called with bh processing disabled.
1489 */
1490
1491 /* Do Stevens' IPV6_PKTOPTIONS.
1492
1493 Yes, guys, it is the only place in our code, where we
1494 may make it not affecting IPv4.
1495 The rest of code is protocol independent,
1496 and I do not like idea to uglify IPv4.
1497
1498 Actually, all the idea behind IPV6_PKTOPTIONS
1499 looks not very well thought. For now we latch
1500 options, received in the last packet, enqueued
1501 by tcp. Feel free to propose better solution.
1502 --ANK (980728)
1503 */
1504 if (np->rxopt.all)
1505 opt_skb = skb_clone(skb, GFP_ATOMIC);
1506
1507 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1508 sock_rps_save_rxhash(sk, skb);
1509 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1510 goto reset;
1511 if (opt_skb)
1512 goto ipv6_pktoptions;
1513 return 0;
1514 }
1515
1516 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1517 goto csum_err;
1518
1519 if (sk->sk_state == TCP_LISTEN) {
1520 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1521 if (!nsk)
1522 goto discard;
1523
1524 /*
1525 * Queue it on the new socket if the new socket is active,
1526 * otherwise we just shortcircuit this and continue with
1527 * the new socket..
1528 */
1529 if(nsk != sk) {
1530 sock_rps_save_rxhash(nsk, skb);
1531 if (tcp_child_process(sk, nsk, skb))
1532 goto reset;
1533 if (opt_skb)
1534 __kfree_skb(opt_skb);
1535 return 0;
1536 }
1537 } else
1538 sock_rps_save_rxhash(sk, skb);
1539
1540 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1541 goto reset;
1542 if (opt_skb)
1543 goto ipv6_pktoptions;
1544 return 0;
1545
1546reset:
1547 tcp_v6_send_reset(sk, skb);
1548discard:
1549 if (opt_skb)
1550 __kfree_skb(opt_skb);
1551 kfree_skb(skb);
1552 return 0;
1553csum_err:
1554 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1555 goto discard;
1556
1557
1558ipv6_pktoptions:
1559 /* Do you ask, what is it?
1560
1561 1. skb was enqueued by tcp.
1562 2. skb is added to tail of read queue, rather than out of order.
1563 3. socket is not in passive state.
1564 4. Finally, it really contains options, which user wants to receive.
1565 */
1566 tp = tcp_sk(sk);
1567 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1568 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1569 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1570 np->mcast_oif = inet6_iif(opt_skb);
1571 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1572 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1573 if (np->rxopt.bits.rxtclass)
1574 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1575 if (ipv6_opt_accepted(sk, opt_skb)) {
1576 skb_set_owner_r(opt_skb, sk);
1577 opt_skb = xchg(&np->pktoptions, opt_skb);
1578 } else {
1579 __kfree_skb(opt_skb);
1580 opt_skb = xchg(&np->pktoptions, NULL);
1581 }
1582 }
1583
1584 kfree_skb(opt_skb);
1585 return 0;
1586}
1587
1588static int tcp_v6_rcv(struct sk_buff *skb)
1589{
1590 const struct tcphdr *th;
1591 const struct ipv6hdr *hdr;
1592 struct sock *sk;
1593 int ret;
1594 struct net *net = dev_net(skb->dev);
1595
1596 if (skb->pkt_type != PACKET_HOST)
1597 goto discard_it;
1598
1599 /*
1600 * Count it even if it's bad.
1601 */
1602 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1603
1604 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1605 goto discard_it;
1606
1607 th = tcp_hdr(skb);
1608
1609 if (th->doff < sizeof(struct tcphdr)/4)
1610 goto bad_packet;
1611 if (!pskb_may_pull(skb, th->doff*4))
1612 goto discard_it;
1613
1614 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1615 goto bad_packet;
1616
1617 th = tcp_hdr(skb);
1618 hdr = ipv6_hdr(skb);
1619 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621 skb->len - th->doff*4);
1622 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623 TCP_SKB_CB(skb)->when = 0;
1624 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1625 TCP_SKB_CB(skb)->sacked = 0;
1626
1627 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1628 if (!sk)
1629 goto no_tcp_socket;
1630
1631process:
1632 if (sk->sk_state == TCP_TIME_WAIT)
1633 goto do_time_wait;
1634
1635 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1636 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1637 goto discard_and_relse;
1638 }
1639
1640 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1641 goto discard_and_relse;
1642
1643 if (sk_filter(sk, skb))
1644 goto discard_and_relse;
1645
1646 skb->dev = NULL;
1647
1648 bh_lock_sock_nested(sk);
1649 ret = 0;
1650 if (!sock_owned_by_user(sk)) {
1651#ifdef CONFIG_NET_DMA
1652 struct tcp_sock *tp = tcp_sk(sk);
1653 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1654 tp->ucopy.dma_chan = net_dma_find_channel();
1655 if (tp->ucopy.dma_chan)
1656 ret = tcp_v6_do_rcv(sk, skb);
1657 else
1658#endif
1659 {
1660 if (!tcp_prequeue(sk, skb))
1661 ret = tcp_v6_do_rcv(sk, skb);
1662 }
1663 } else if (unlikely(sk_add_backlog(sk, skb,
1664 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1665 bh_unlock_sock(sk);
1666 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1667 goto discard_and_relse;
1668 }
1669 bh_unlock_sock(sk);
1670
1671 sock_put(sk);
1672 return ret ? -1 : 0;
1673
1674no_tcp_socket:
1675 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1676 goto discard_it;
1677
1678 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1679bad_packet:
1680 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1681 } else {
1682 tcp_v6_send_reset(NULL, skb);
1683 }
1684
1685discard_it:
1686
1687 /*
1688 * Discard frame
1689 */
1690
1691 kfree_skb(skb);
1692 return 0;
1693
1694discard_and_relse:
1695 sock_put(sk);
1696 goto discard_it;
1697
1698do_time_wait:
1699 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1700 inet_twsk_put(inet_twsk(sk));
1701 goto discard_it;
1702 }
1703
1704 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1705 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1706 inet_twsk_put(inet_twsk(sk));
1707 goto discard_it;
1708 }
1709
1710 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1711 case TCP_TW_SYN:
1712 {
1713 struct sock *sk2;
1714
1715 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1716 &ipv6_hdr(skb)->daddr,
1717 ntohs(th->dest), inet6_iif(skb));
1718 if (sk2 != NULL) {
1719 struct inet_timewait_sock *tw = inet_twsk(sk);
1720 inet_twsk_deschedule(tw, &tcp_death_row);
1721 inet_twsk_put(tw);
1722 sk = sk2;
1723 goto process;
1724 }
1725 /* Fall through to ACK */
1726 }
1727 case TCP_TW_ACK:
1728 tcp_v6_timewait_ack(sk, skb);
1729 break;
1730 case TCP_TW_RST:
1731 goto no_tcp_socket;
1732 case TCP_TW_SUCCESS:;
1733 }
1734 goto discard_it;
1735}
1736
1737static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1738{
1739 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1740 struct ipv6_pinfo *np = inet6_sk(sk);
1741 struct inet_peer *peer;
1742
1743 if (!rt ||
1744 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1745 peer = inet_getpeer_v6(&np->daddr, 1);
1746 *release_it = true;
1747 } else {
1748 if (!rt->rt6i_peer)
1749 rt6_bind_peer(rt, 1);
1750 peer = rt->rt6i_peer;
1751 *release_it = false;
1752 }
1753
1754 return peer;
1755}
1756
1757static void *tcp_v6_tw_get_peer(struct sock *sk)
1758{
1759 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1760 const struct inet_timewait_sock *tw = inet_twsk(sk);
1761
1762 if (tw->tw_family == AF_INET)
1763 return tcp_v4_tw_get_peer(sk);
1764
1765 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1766}
1767
1768static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1769 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1770 .twsk_unique = tcp_twsk_unique,
1771 .twsk_destructor= tcp_twsk_destructor,
1772 .twsk_getpeer = tcp_v6_tw_get_peer,
1773};
1774
1775static const struct inet_connection_sock_af_ops ipv6_specific = {
1776 .queue_xmit = inet6_csk_xmit,
1777 .send_check = tcp_v6_send_check,
1778 .rebuild_header = inet6_sk_rebuild_header,
1779 .conn_request = tcp_v6_conn_request,
1780 .syn_recv_sock = tcp_v6_syn_recv_sock,
1781 .get_peer = tcp_v6_get_peer,
1782 .net_header_len = sizeof(struct ipv6hdr),
1783 .net_frag_header_len = sizeof(struct frag_hdr),
1784 .setsockopt = ipv6_setsockopt,
1785 .getsockopt = ipv6_getsockopt,
1786 .addr2sockaddr = inet6_csk_addr2sockaddr,
1787 .sockaddr_len = sizeof(struct sockaddr_in6),
1788 .bind_conflict = inet6_csk_bind_conflict,
1789#ifdef CONFIG_COMPAT
1790 .compat_setsockopt = compat_ipv6_setsockopt,
1791 .compat_getsockopt = compat_ipv6_getsockopt,
1792#endif
1793};
1794
1795#ifdef CONFIG_TCP_MD5SIG
1796static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1797 .md5_lookup = tcp_v6_md5_lookup,
1798 .calc_md5_hash = tcp_v6_md5_hash_skb,
1799 .md5_parse = tcp_v6_parse_md5_keys,
1800};
1801#endif
1802
1803/*
1804 * TCP over IPv4 via INET6 API
1805 */
1806
1807static const struct inet_connection_sock_af_ops ipv6_mapped = {
1808 .queue_xmit = ip_queue_xmit,
1809 .send_check = tcp_v4_send_check,
1810 .rebuild_header = inet_sk_rebuild_header,
1811 .conn_request = tcp_v6_conn_request,
1812 .syn_recv_sock = tcp_v6_syn_recv_sock,
1813 .get_peer = tcp_v4_get_peer,
1814 .net_header_len = sizeof(struct iphdr),
1815 .setsockopt = ipv6_setsockopt,
1816 .getsockopt = ipv6_getsockopt,
1817 .addr2sockaddr = inet6_csk_addr2sockaddr,
1818 .sockaddr_len = sizeof(struct sockaddr_in6),
1819 .bind_conflict = inet6_csk_bind_conflict,
1820#ifdef CONFIG_COMPAT
1821 .compat_setsockopt = compat_ipv6_setsockopt,
1822 .compat_getsockopt = compat_ipv6_getsockopt,
1823#endif
1824};
1825
1826#ifdef CONFIG_TCP_MD5SIG
1827static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1828 .md5_lookup = tcp_v4_md5_lookup,
1829 .calc_md5_hash = tcp_v4_md5_hash_skb,
1830 .md5_parse = tcp_v6_parse_md5_keys,
1831};
1832#endif
1833
1834/* NOTE: A lot of things set to zero explicitly by call to
1835 * sk_alloc() so need not be done here.
1836 */
1837static int tcp_v6_init_sock(struct sock *sk)
1838{
1839 struct inet_connection_sock *icsk = inet_csk(sk);
1840
1841 tcp_init_sock(sk);
1842
1843 icsk->icsk_af_ops = &ipv6_specific;
1844
1845#ifdef CONFIG_TCP_MD5SIG
1846 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1847#endif
1848
1849 return 0;
1850}
1851
1852static void tcp_v6_destroy_sock(struct sock *sk)
1853{
1854 tcp_v4_destroy_sock(sk);
1855 inet6_destroy_sock(sk);
1856}
1857
1858#ifdef CONFIG_PROC_FS
1859/* Proc filesystem TCPv6 sock list dumping. */
1860static void get_openreq6(struct seq_file *seq,
1861 const struct sock *sk, struct request_sock *req, int i, int uid)
1862{
1863 int ttd = req->expires - jiffies;
1864 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1865 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1866
1867 if (ttd < 0)
1868 ttd = 0;
1869
1870 seq_printf(seq,
1871 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1872 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1873 i,
1874 src->s6_addr32[0], src->s6_addr32[1],
1875 src->s6_addr32[2], src->s6_addr32[3],
1876 ntohs(inet_rsk(req)->loc_port),
1877 dest->s6_addr32[0], dest->s6_addr32[1],
1878 dest->s6_addr32[2], dest->s6_addr32[3],
1879 ntohs(inet_rsk(req)->rmt_port),
1880 TCP_SYN_RECV,
1881 0,0, /* could print option size, but that is af dependent. */
1882 1, /* timers active (only the expire timer) */
1883 jiffies_to_clock_t(ttd),
1884 req->retrans,
1885 uid,
1886 0, /* non standard timer */
1887 0, /* open_requests have no inode */
1888 0, req);
1889}
1890
1891static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1892{
1893 const struct in6_addr *dest, *src;
1894 __u16 destp, srcp;
1895 int timer_active;
1896 unsigned long timer_expires;
1897 const struct inet_sock *inet = inet_sk(sp);
1898 const struct tcp_sock *tp = tcp_sk(sp);
1899 const struct inet_connection_sock *icsk = inet_csk(sp);
1900 const struct ipv6_pinfo *np = inet6_sk(sp);
1901
1902 dest = &np->daddr;
1903 src = &np->rcv_saddr;
1904 destp = ntohs(inet->inet_dport);
1905 srcp = ntohs(inet->inet_sport);
1906
1907 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1908 timer_active = 1;
1909 timer_expires = icsk->icsk_timeout;
1910 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1911 timer_active = 4;
1912 timer_expires = icsk->icsk_timeout;
1913 } else if (timer_pending(&sp->sk_timer)) {
1914 timer_active = 2;
1915 timer_expires = sp->sk_timer.expires;
1916 } else {
1917 timer_active = 0;
1918 timer_expires = jiffies;
1919 }
1920
1921 seq_printf(seq,
1922 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1923 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1924 i,
1925 src->s6_addr32[0], src->s6_addr32[1],
1926 src->s6_addr32[2], src->s6_addr32[3], srcp,
1927 dest->s6_addr32[0], dest->s6_addr32[1],
1928 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1929 sp->sk_state,
1930 tp->write_seq-tp->snd_una,
1931 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1932 timer_active,
1933 jiffies_to_clock_t(timer_expires - jiffies),
1934 icsk->icsk_retransmits,
1935 sock_i_uid(sp),
1936 icsk->icsk_probes_out,
1937 sock_i_ino(sp),
1938 atomic_read(&sp->sk_refcnt), sp,
1939 jiffies_to_clock_t(icsk->icsk_rto),
1940 jiffies_to_clock_t(icsk->icsk_ack.ato),
1941 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1942 tp->snd_cwnd,
1943 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1944 );
1945}
1946
1947static void get_timewait6_sock(struct seq_file *seq,
1948 struct inet_timewait_sock *tw, int i)
1949{
1950 const struct in6_addr *dest, *src;
1951 __u16 destp, srcp;
1952 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1953 int ttd = tw->tw_ttd - jiffies;
1954
1955 if (ttd < 0)
1956 ttd = 0;
1957
1958 dest = &tw6->tw_v6_daddr;
1959 src = &tw6->tw_v6_rcv_saddr;
1960 destp = ntohs(tw->tw_dport);
1961 srcp = ntohs(tw->tw_sport);
1962
1963 seq_printf(seq,
1964 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1965 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1966 i,
1967 src->s6_addr32[0], src->s6_addr32[1],
1968 src->s6_addr32[2], src->s6_addr32[3], srcp,
1969 dest->s6_addr32[0], dest->s6_addr32[1],
1970 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1971 tw->tw_substate, 0, 0,
1972 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1973 atomic_read(&tw->tw_refcnt), tw);
1974}
1975
1976static int tcp6_seq_show(struct seq_file *seq, void *v)
1977{
1978 struct tcp_iter_state *st;
1979
1980 if (v == SEQ_START_TOKEN) {
1981 seq_puts(seq,
1982 " sl "
1983 "local_address "
1984 "remote_address "
1985 "st tx_queue rx_queue tr tm->when retrnsmt"
1986 " uid timeout inode\n");
1987 goto out;
1988 }
1989 st = seq->private;
1990
1991 switch (st->state) {
1992 case TCP_SEQ_STATE_LISTENING:
1993 case TCP_SEQ_STATE_ESTABLISHED:
1994 get_tcp6_sock(seq, v, st->num);
1995 break;
1996 case TCP_SEQ_STATE_OPENREQ:
1997 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1998 break;
1999 case TCP_SEQ_STATE_TIME_WAIT:
2000 get_timewait6_sock(seq, v, st->num);
2001 break;
2002 }
2003out:
2004 return 0;
2005}
2006
2007static const struct file_operations tcp6_afinfo_seq_fops = {
2008 .owner = THIS_MODULE,
2009 .open = tcp_seq_open,
2010 .read = seq_read,
2011 .llseek = seq_lseek,
2012 .release = seq_release_net
2013};
2014
2015static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2016 .name = "tcp6",
2017 .family = AF_INET6,
2018 .seq_fops = &tcp6_afinfo_seq_fops,
2019 .seq_ops = {
2020 .show = tcp6_seq_show,
2021 },
2022};
2023
2024int __net_init tcp6_proc_init(struct net *net)
2025{
2026 return tcp_proc_register(net, &tcp6_seq_afinfo);
2027}
2028
2029void tcp6_proc_exit(struct net *net)
2030{
2031 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2032}
2033#endif
2034
2035struct proto tcpv6_prot = {
2036 .name = "TCPv6",
2037 .owner = THIS_MODULE,
2038 .close = tcp_close,
2039 .connect = tcp_v6_connect,
2040 .disconnect = tcp_disconnect,
2041 .accept = inet_csk_accept,
2042 .ioctl = tcp_ioctl,
2043 .init = tcp_v6_init_sock,
2044 .destroy = tcp_v6_destroy_sock,
2045 .shutdown = tcp_shutdown,
2046 .setsockopt = tcp_setsockopt,
2047 .getsockopt = tcp_getsockopt,
2048 .recvmsg = tcp_recvmsg,
2049 .sendmsg = tcp_sendmsg,
2050 .sendpage = tcp_sendpage,
2051 .backlog_rcv = tcp_v6_do_rcv,
2052 .hash = tcp_v6_hash,
2053 .unhash = inet_unhash,
2054 .get_port = inet_csk_get_port,
2055 .enter_memory_pressure = tcp_enter_memory_pressure,
2056 .sockets_allocated = &tcp_sockets_allocated,
2057 .memory_allocated = &tcp_memory_allocated,
2058 .memory_pressure = &tcp_memory_pressure,
2059 .orphan_count = &tcp_orphan_count,
2060 .sysctl_wmem = sysctl_tcp_wmem,
2061 .sysctl_rmem = sysctl_tcp_rmem,
2062 .max_header = MAX_TCP_HEADER,
2063 .obj_size = sizeof(struct tcp6_sock),
2064 .slab_flags = SLAB_DESTROY_BY_RCU,
2065 .twsk_prot = &tcp6_timewait_sock_ops,
2066 .rsk_prot = &tcp6_request_sock_ops,
2067 .h.hashinfo = &tcp_hashinfo,
2068 .no_autobind = true,
2069#ifdef CONFIG_COMPAT
2070 .compat_setsockopt = compat_tcp_setsockopt,
2071 .compat_getsockopt = compat_tcp_getsockopt,
2072#endif
2073#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2074 .proto_cgroup = tcp_proto_cgroup,
2075#endif
2076};
2077
2078static const struct inet6_protocol tcpv6_protocol = {
2079 .handler = tcp_v6_rcv,
2080 .err_handler = tcp_v6_err,
2081 .gso_send_check = tcp_v6_gso_send_check,
2082 .gso_segment = tcp_tso_segment,
2083 .gro_receive = tcp6_gro_receive,
2084 .gro_complete = tcp6_gro_complete,
2085 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2086};
2087
2088static struct inet_protosw tcpv6_protosw = {
2089 .type = SOCK_STREAM,
2090 .protocol = IPPROTO_TCP,
2091 .prot = &tcpv6_prot,
2092 .ops = &inet6_stream_ops,
2093 .no_check = 0,
2094 .flags = INET_PROTOSW_PERMANENT |
2095 INET_PROTOSW_ICSK,
2096};
2097
2098static int __net_init tcpv6_net_init(struct net *net)
2099{
2100 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2101 SOCK_RAW, IPPROTO_TCP, net);
2102}
2103
2104static void __net_exit tcpv6_net_exit(struct net *net)
2105{
2106 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2107}
2108
2109static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2110{
2111 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2112}
2113
2114static struct pernet_operations tcpv6_net_ops = {
2115 .init = tcpv6_net_init,
2116 .exit = tcpv6_net_exit,
2117 .exit_batch = tcpv6_net_exit_batch,
2118};
2119
2120int __init tcpv6_init(void)
2121{
2122 int ret;
2123
2124 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2125 if (ret)
2126 goto out;
2127
2128 /* register inet6 protocol */
2129 ret = inet6_register_protosw(&tcpv6_protosw);
2130 if (ret)
2131 goto out_tcpv6_protocol;
2132
2133 ret = register_pernet_subsys(&tcpv6_net_ops);
2134 if (ret)
2135 goto out_tcpv6_protosw;
2136out:
2137 return ret;
2138
2139out_tcpv6_protocol:
2140 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2141out_tcpv6_protosw:
2142 inet6_unregister_protosw(&tcpv6_protosw);
2143 goto out;
2144}
2145
2146void tcpv6_exit(void)
2147{
2148 unregister_pernet_subsys(&tcpv6_net_ops);
2149 inet6_unregister_protosw(&tcpv6_protosw);
2150 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2151}