Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * IPv4 specific functions
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 */
18
19/*
20 * Changes:
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
29 * ACK bit.
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
32 * request_sock handling and moved
33 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
35 * Added new listen semantics.
36 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
40 * coma.
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
46 */
47
48#define pr_fmt(fmt) "TCP: " fmt
49
50#include <linux/bottom_half.h>
51#include <linux/types.h>
52#include <linux/fcntl.h>
53#include <linux/module.h>
54#include <linux/random.h>
55#include <linux/cache.h>
56#include <linux/jhash.h>
57#include <linux/init.h>
58#include <linux/times.h>
59#include <linux/slab.h>
60
61#include <net/net_namespace.h>
62#include <net/icmp.h>
63#include <net/inet_hashtables.h>
64#include <net/tcp.h>
65#include <net/transp_v6.h>
66#include <net/ipv6.h>
67#include <net/inet_common.h>
68#include <net/timewait_sock.h>
69#include <net/xfrm.h>
70#include <net/secure_seq.h>
71#include <net/busy_poll.h>
72
73#include <linux/inet.h>
74#include <linux/ipv6.h>
75#include <linux/stddef.h>
76#include <linux/proc_fs.h>
77#include <linux/seq_file.h>
78#include <linux/inetdevice.h>
79
80#include <crypto/hash.h>
81#include <linux/scatterlist.h>
82
83#include <trace/events/tcp.h>
84
85#ifdef CONFIG_TCP_MD5SIG
86static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
87 __be32 daddr, __be32 saddr, const struct tcphdr *th);
88#endif
89
90struct inet_hashinfo tcp_hashinfo;
91EXPORT_SYMBOL(tcp_hashinfo);
92
93static u32 tcp_v4_init_seq(const struct sk_buff *skb)
94{
95 return secure_tcp_seq(ip_hdr(skb)->daddr,
96 ip_hdr(skb)->saddr,
97 tcp_hdr(skb)->dest,
98 tcp_hdr(skb)->source);
99}
100
101static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
102{
103 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
104}
105
106int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
107{
108 const struct inet_timewait_sock *tw = inet_twsk(sktw);
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
111 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
112
113 if (reuse == 2) {
114 /* Still does not detect *everything* that goes through
115 * lo, since we require a loopback src or dst address
116 * or direct binding to 'lo' interface.
117 */
118 bool loopback = false;
119 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
120 loopback = true;
121#if IS_ENABLED(CONFIG_IPV6)
122 if (tw->tw_family == AF_INET6) {
123 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
125 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
126 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
127 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
128 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
129 loopback = true;
130 } else
131#endif
132 {
133 if (ipv4_is_loopback(tw->tw_daddr) ||
134 ipv4_is_loopback(tw->tw_rcv_saddr))
135 loopback = true;
136 }
137 if (!loopback)
138 reuse = 0;
139 }
140
141 /* With PAWS, it is safe from the viewpoint
142 of data integrity. Even without PAWS it is safe provided sequence
143 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
144
145 Actually, the idea is close to VJ's one, only timestamp cache is
146 held not per host, but per port pair and TW bucket is used as state
147 holder.
148
149 If TW bucket has been already destroyed we fall back to VJ's scheme
150 and use initial timestamp retrieved from peer table.
151 */
152 if (tcptw->tw_ts_recent_stamp &&
153 (!twp || (reuse && time_after32(ktime_get_seconds(),
154 tcptw->tw_ts_recent_stamp)))) {
155 /* In case of repair and re-using TIME-WAIT sockets we still
156 * want to be sure that it is safe as above but honor the
157 * sequence numbers and time stamps set as part of the repair
158 * process.
159 *
160 * Without this check re-using a TIME-WAIT socket with TCP
161 * repair would accumulate a -1 on the repair assigned
162 * sequence number. The first time it is reused the sequence
163 * is -1, the second time -2, etc. This fixes that issue
164 * without appearing to create any others.
165 */
166 if (likely(!tp->repair)) {
167 u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
168
169 if (!seq)
170 seq = 1;
171 WRITE_ONCE(tp->write_seq, seq);
172 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
173 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
174 }
175 sock_hold(sktw);
176 return 1;
177 }
178
179 return 0;
180}
181EXPORT_SYMBOL_GPL(tcp_twsk_unique);
182
183static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
184 int addr_len)
185{
186 /* This check is replicated from tcp_v4_connect() and intended to
187 * prevent BPF program called below from accessing bytes that are out
188 * of the bound specified by user in addr_len.
189 */
190 if (addr_len < sizeof(struct sockaddr_in))
191 return -EINVAL;
192
193 sock_owned_by_me(sk);
194
195 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
196}
197
198/* This will initiate an outgoing connection. */
199int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
200{
201 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
202 struct inet_sock *inet = inet_sk(sk);
203 struct tcp_sock *tp = tcp_sk(sk);
204 __be16 orig_sport, orig_dport;
205 __be32 daddr, nexthop;
206 struct flowi4 *fl4;
207 struct rtable *rt;
208 int err;
209 struct ip_options_rcu *inet_opt;
210 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
211
212 if (addr_len < sizeof(struct sockaddr_in))
213 return -EINVAL;
214
215 if (usin->sin_family != AF_INET)
216 return -EAFNOSUPPORT;
217
218 nexthop = daddr = usin->sin_addr.s_addr;
219 inet_opt = rcu_dereference_protected(inet->inet_opt,
220 lockdep_sock_is_held(sk));
221 if (inet_opt && inet_opt->opt.srr) {
222 if (!daddr)
223 return -EINVAL;
224 nexthop = inet_opt->opt.faddr;
225 }
226
227 orig_sport = inet->inet_sport;
228 orig_dport = usin->sin_port;
229 fl4 = &inet->cork.fl.u.ip4;
230 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
231 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
232 IPPROTO_TCP,
233 orig_sport, orig_dport, sk);
234 if (IS_ERR(rt)) {
235 err = PTR_ERR(rt);
236 if (err == -ENETUNREACH)
237 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
238 return err;
239 }
240
241 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
242 ip_rt_put(rt);
243 return -ENETUNREACH;
244 }
245
246 if (!inet_opt || !inet_opt->opt.srr)
247 daddr = fl4->daddr;
248
249 if (!inet->inet_saddr)
250 inet->inet_saddr = fl4->saddr;
251 sk_rcv_saddr_set(sk, inet->inet_saddr);
252
253 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
254 /* Reset inherited state */
255 tp->rx_opt.ts_recent = 0;
256 tp->rx_opt.ts_recent_stamp = 0;
257 if (likely(!tp->repair))
258 WRITE_ONCE(tp->write_seq, 0);
259 }
260
261 inet->inet_dport = usin->sin_port;
262 sk_daddr_set(sk, daddr);
263
264 inet_csk(sk)->icsk_ext_hdr_len = 0;
265 if (inet_opt)
266 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
267
268 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
269
270 /* Socket identity is still unknown (sport may be zero).
271 * However we set state to SYN-SENT and not releasing socket
272 * lock select source port, enter ourselves into the hash tables and
273 * complete initialization after this.
274 */
275 tcp_set_state(sk, TCP_SYN_SENT);
276 err = inet_hash_connect(tcp_death_row, sk);
277 if (err)
278 goto failure;
279
280 sk_set_txhash(sk);
281
282 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
283 inet->inet_sport, inet->inet_dport, sk);
284 if (IS_ERR(rt)) {
285 err = PTR_ERR(rt);
286 rt = NULL;
287 goto failure;
288 }
289 /* OK, now commit destination to socket. */
290 sk->sk_gso_type = SKB_GSO_TCPV4;
291 sk_setup_caps(sk, &rt->dst);
292 rt = NULL;
293
294 if (likely(!tp->repair)) {
295 if (!tp->write_seq)
296 WRITE_ONCE(tp->write_seq,
297 secure_tcp_seq(inet->inet_saddr,
298 inet->inet_daddr,
299 inet->inet_sport,
300 usin->sin_port));
301 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
302 inet->inet_saddr,
303 inet->inet_daddr);
304 }
305
306 inet->inet_id = prandom_u32();
307
308 if (tcp_fastopen_defer_connect(sk, &err))
309 return err;
310 if (err)
311 goto failure;
312
313 err = tcp_connect(sk);
314
315 if (err)
316 goto failure;
317
318 return 0;
319
320failure:
321 /*
322 * This unhashes the socket and releases the local port,
323 * if necessary.
324 */
325 tcp_set_state(sk, TCP_CLOSE);
326 ip_rt_put(rt);
327 sk->sk_route_caps = 0;
328 inet->inet_dport = 0;
329 return err;
330}
331EXPORT_SYMBOL(tcp_v4_connect);
332
333/*
334 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
335 * It can be called through tcp_release_cb() if socket was owned by user
336 * at the time tcp_v4_err() was called to handle ICMP message.
337 */
338void tcp_v4_mtu_reduced(struct sock *sk)
339{
340 struct inet_sock *inet = inet_sk(sk);
341 struct dst_entry *dst;
342 u32 mtu;
343
344 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
345 return;
346 mtu = tcp_sk(sk)->mtu_info;
347 dst = inet_csk_update_pmtu(sk, mtu);
348 if (!dst)
349 return;
350
351 /* Something is about to be wrong... Remember soft error
352 * for the case, if this connection will not able to recover.
353 */
354 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
355 sk->sk_err_soft = EMSGSIZE;
356
357 mtu = dst_mtu(dst);
358
359 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
360 ip_sk_accept_pmtu(sk) &&
361 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
362 tcp_sync_mss(sk, mtu);
363
364 /* Resend the TCP packet because it's
365 * clear that the old packet has been
366 * dropped. This is the new "fast" path mtu
367 * discovery.
368 */
369 tcp_simple_retransmit(sk);
370 } /* else let the usual retransmit timer handle it */
371}
372EXPORT_SYMBOL(tcp_v4_mtu_reduced);
373
374static void do_redirect(struct sk_buff *skb, struct sock *sk)
375{
376 struct dst_entry *dst = __sk_dst_check(sk, 0);
377
378 if (dst)
379 dst->ops->redirect(dst, sk, skb);
380}
381
382
383/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
384void tcp_req_err(struct sock *sk, u32 seq, bool abort)
385{
386 struct request_sock *req = inet_reqsk(sk);
387 struct net *net = sock_net(sk);
388
389 /* ICMPs are not backlogged, hence we cannot get
390 * an established socket here.
391 */
392 if (seq != tcp_rsk(req)->snt_isn) {
393 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
394 } else if (abort) {
395 /*
396 * Still in SYN_RECV, just remove it silently.
397 * There is no good way to pass the error to the newly
398 * created socket, and POSIX does not want network
399 * errors returned from accept().
400 */
401 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
402 tcp_listendrop(req->rsk_listener);
403 }
404 reqsk_put(req);
405}
406EXPORT_SYMBOL(tcp_req_err);
407
408/*
409 * This routine is called by the ICMP module when it gets some
410 * sort of error condition. If err < 0 then the socket should
411 * be closed and the error returned to the user. If err > 0
412 * it's just the icmp type << 8 | icmp code. After adjustment
413 * header points to the first 8 bytes of the tcp header. We need
414 * to find the appropriate port.
415 *
416 * The locking strategy used here is very "optimistic". When
417 * someone else accesses the socket the ICMP is just dropped
418 * and for some paths there is no check at all.
419 * A more general error queue to queue errors for later handling
420 * is probably better.
421 *
422 */
423
424int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
425{
426 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
427 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
428 struct inet_connection_sock *icsk;
429 struct tcp_sock *tp;
430 struct inet_sock *inet;
431 const int type = icmp_hdr(icmp_skb)->type;
432 const int code = icmp_hdr(icmp_skb)->code;
433 struct sock *sk;
434 struct sk_buff *skb;
435 struct request_sock *fastopen;
436 u32 seq, snd_una;
437 s32 remaining;
438 u32 delta_us;
439 int err;
440 struct net *net = dev_net(icmp_skb->dev);
441
442 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
443 th->dest, iph->saddr, ntohs(th->source),
444 inet_iif(icmp_skb), 0);
445 if (!sk) {
446 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
447 return -ENOENT;
448 }
449 if (sk->sk_state == TCP_TIME_WAIT) {
450 inet_twsk_put(inet_twsk(sk));
451 return 0;
452 }
453 seq = ntohl(th->seq);
454 if (sk->sk_state == TCP_NEW_SYN_RECV) {
455 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
456 type == ICMP_TIME_EXCEEDED ||
457 (type == ICMP_DEST_UNREACH &&
458 (code == ICMP_NET_UNREACH ||
459 code == ICMP_HOST_UNREACH)));
460 return 0;
461 }
462
463 bh_lock_sock(sk);
464 /* If too many ICMPs get dropped on busy
465 * servers this needs to be solved differently.
466 * We do take care of PMTU discovery (RFC1191) special case :
467 * we can receive locally generated ICMP messages while socket is held.
468 */
469 if (sock_owned_by_user(sk)) {
470 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
471 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
472 }
473 if (sk->sk_state == TCP_CLOSE)
474 goto out;
475
476 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
477 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
478 goto out;
479 }
480
481 icsk = inet_csk(sk);
482 tp = tcp_sk(sk);
483 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
484 fastopen = rcu_dereference(tp->fastopen_rsk);
485 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
486 if (sk->sk_state != TCP_LISTEN &&
487 !between(seq, snd_una, tp->snd_nxt)) {
488 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
489 goto out;
490 }
491
492 switch (type) {
493 case ICMP_REDIRECT:
494 if (!sock_owned_by_user(sk))
495 do_redirect(icmp_skb, sk);
496 goto out;
497 case ICMP_SOURCE_QUENCH:
498 /* Just silently ignore these. */
499 goto out;
500 case ICMP_PARAMETERPROB:
501 err = EPROTO;
502 break;
503 case ICMP_DEST_UNREACH:
504 if (code > NR_ICMP_UNREACH)
505 goto out;
506
507 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
508 /* We are not interested in TCP_LISTEN and open_requests
509 * (SYN-ACKs send out by Linux are always <576bytes so
510 * they should go through unfragmented).
511 */
512 if (sk->sk_state == TCP_LISTEN)
513 goto out;
514
515 tp->mtu_info = info;
516 if (!sock_owned_by_user(sk)) {
517 tcp_v4_mtu_reduced(sk);
518 } else {
519 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
520 sock_hold(sk);
521 }
522 goto out;
523 }
524
525 err = icmp_err_convert[code].errno;
526 /* check if icmp_skb allows revert of backoff
527 * (see draft-zimmermann-tcp-lcd) */
528 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
529 break;
530 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
531 !icsk->icsk_backoff || fastopen)
532 break;
533
534 if (sock_owned_by_user(sk))
535 break;
536
537 skb = tcp_rtx_queue_head(sk);
538 if (WARN_ON_ONCE(!skb))
539 break;
540
541 icsk->icsk_backoff--;
542 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
543 TCP_TIMEOUT_INIT;
544 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
545
546
547 tcp_mstamp_refresh(tp);
548 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
549 remaining = icsk->icsk_rto -
550 usecs_to_jiffies(delta_us);
551
552 if (remaining > 0) {
553 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
554 remaining, TCP_RTO_MAX);
555 } else {
556 /* RTO revert clocked out retransmission.
557 * Will retransmit now */
558 tcp_retransmit_timer(sk);
559 }
560
561 break;
562 case ICMP_TIME_EXCEEDED:
563 err = EHOSTUNREACH;
564 break;
565 default:
566 goto out;
567 }
568
569 switch (sk->sk_state) {
570 case TCP_SYN_SENT:
571 case TCP_SYN_RECV:
572 /* Only in fast or simultaneous open. If a fast open socket is
573 * is already accepted it is treated as a connected one below.
574 */
575 if (fastopen && !fastopen->sk)
576 break;
577
578 if (!sock_owned_by_user(sk)) {
579 sk->sk_err = err;
580
581 sk->sk_error_report(sk);
582
583 tcp_done(sk);
584 } else {
585 sk->sk_err_soft = err;
586 }
587 goto out;
588 }
589
590 /* If we've already connected we will keep trying
591 * until we time out, or the user gives up.
592 *
593 * rfc1122 4.2.3.9 allows to consider as hard errors
594 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
595 * but it is obsoleted by pmtu discovery).
596 *
597 * Note, that in modern internet, where routing is unreliable
598 * and in each dark corner broken firewalls sit, sending random
599 * errors ordered by their masters even this two messages finally lose
600 * their original sense (even Linux sends invalid PORT_UNREACHs)
601 *
602 * Now we are in compliance with RFCs.
603 * --ANK (980905)
604 */
605
606 inet = inet_sk(sk);
607 if (!sock_owned_by_user(sk) && inet->recverr) {
608 sk->sk_err = err;
609 sk->sk_error_report(sk);
610 } else { /* Only an error on timeout */
611 sk->sk_err_soft = err;
612 }
613
614out:
615 bh_unlock_sock(sk);
616 sock_put(sk);
617 return 0;
618}
619
620void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
621{
622 struct tcphdr *th = tcp_hdr(skb);
623
624 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
625 skb->csum_start = skb_transport_header(skb) - skb->head;
626 skb->csum_offset = offsetof(struct tcphdr, check);
627}
628
629/* This routine computes an IPv4 TCP checksum. */
630void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
631{
632 const struct inet_sock *inet = inet_sk(sk);
633
634 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
635}
636EXPORT_SYMBOL(tcp_v4_send_check);
637
638/*
639 * This routine will send an RST to the other tcp.
640 *
641 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
642 * for reset.
643 * Answer: if a packet caused RST, it is not for a socket
644 * existing in our system, if it is matched to a socket,
645 * it is just duplicate segment or bug in other side's TCP.
646 * So that we build reply only basing on parameters
647 * arrived with segment.
648 * Exception: precedence violation. We do not implement it in any case.
649 */
650
651static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
652{
653 const struct tcphdr *th = tcp_hdr(skb);
654 struct {
655 struct tcphdr th;
656#ifdef CONFIG_TCP_MD5SIG
657 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
658#endif
659 } rep;
660 struct ip_reply_arg arg;
661#ifdef CONFIG_TCP_MD5SIG
662 struct tcp_md5sig_key *key = NULL;
663 const __u8 *hash_location = NULL;
664 unsigned char newhash[16];
665 int genhash;
666 struct sock *sk1 = NULL;
667#endif
668 u64 transmit_time = 0;
669 struct sock *ctl_sk;
670 struct net *net;
671
672 /* Never send a reset in response to a reset. */
673 if (th->rst)
674 return;
675
676 /* If sk not NULL, it means we did a successful lookup and incoming
677 * route had to be correct. prequeue might have dropped our dst.
678 */
679 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
680 return;
681
682 /* Swap the send and the receive. */
683 memset(&rep, 0, sizeof(rep));
684 rep.th.dest = th->source;
685 rep.th.source = th->dest;
686 rep.th.doff = sizeof(struct tcphdr) / 4;
687 rep.th.rst = 1;
688
689 if (th->ack) {
690 rep.th.seq = th->ack_seq;
691 } else {
692 rep.th.ack = 1;
693 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
694 skb->len - (th->doff << 2));
695 }
696
697 memset(&arg, 0, sizeof(arg));
698 arg.iov[0].iov_base = (unsigned char *)&rep;
699 arg.iov[0].iov_len = sizeof(rep.th);
700
701 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
702#ifdef CONFIG_TCP_MD5SIG
703 rcu_read_lock();
704 hash_location = tcp_parse_md5sig_option(th);
705 if (sk && sk_fullsock(sk)) {
706 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
707 &ip_hdr(skb)->saddr, AF_INET);
708 } else if (hash_location) {
709 /*
710 * active side is lost. Try to find listening socket through
711 * source port, and then find md5 key through listening socket.
712 * we are not loose security here:
713 * Incoming packet is checked with md5 hash with finding key,
714 * no RST generated if md5 hash doesn't match.
715 */
716 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
717 ip_hdr(skb)->saddr,
718 th->source, ip_hdr(skb)->daddr,
719 ntohs(th->source), inet_iif(skb),
720 tcp_v4_sdif(skb));
721 /* don't send rst if it can't find key */
722 if (!sk1)
723 goto out;
724
725 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
726 &ip_hdr(skb)->saddr, AF_INET);
727 if (!key)
728 goto out;
729
730
731 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
732 if (genhash || memcmp(hash_location, newhash, 16) != 0)
733 goto out;
734
735 }
736
737 if (key) {
738 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
739 (TCPOPT_NOP << 16) |
740 (TCPOPT_MD5SIG << 8) |
741 TCPOLEN_MD5SIG);
742 /* Update length and the length the header thinks exists */
743 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
744 rep.th.doff = arg.iov[0].iov_len / 4;
745
746 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
747 key, ip_hdr(skb)->saddr,
748 ip_hdr(skb)->daddr, &rep.th);
749 }
750#endif
751 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
752 ip_hdr(skb)->saddr, /* XXX */
753 arg.iov[0].iov_len, IPPROTO_TCP, 0);
754 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
755 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
756
757 /* When socket is gone, all binding information is lost.
758 * routing might fail in this case. No choice here, if we choose to force
759 * input interface, we will misroute in case of asymmetric route.
760 */
761 if (sk) {
762 arg.bound_dev_if = sk->sk_bound_dev_if;
763 if (sk_fullsock(sk))
764 trace_tcp_send_reset(sk, skb);
765 }
766
767 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
768 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
769
770 arg.tos = ip_hdr(skb)->tos;
771 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
772 local_bh_disable();
773 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
774 if (sk) {
775 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
776 inet_twsk(sk)->tw_mark : sk->sk_mark;
777 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
778 inet_twsk(sk)->tw_priority : sk->sk_priority;
779 transmit_time = tcp_transmit_time(sk);
780 }
781 ip_send_unicast_reply(ctl_sk,
782 skb, &TCP_SKB_CB(skb)->header.h4.opt,
783 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
784 &arg, arg.iov[0].iov_len,
785 transmit_time);
786
787 ctl_sk->sk_mark = 0;
788 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
789 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
790 local_bh_enable();
791
792#ifdef CONFIG_TCP_MD5SIG
793out:
794 rcu_read_unlock();
795#endif
796}
797
798/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
799 outside socket context is ugly, certainly. What can I do?
800 */
801
802static void tcp_v4_send_ack(const struct sock *sk,
803 struct sk_buff *skb, u32 seq, u32 ack,
804 u32 win, u32 tsval, u32 tsecr, int oif,
805 struct tcp_md5sig_key *key,
806 int reply_flags, u8 tos)
807{
808 const struct tcphdr *th = tcp_hdr(skb);
809 struct {
810 struct tcphdr th;
811 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
812#ifdef CONFIG_TCP_MD5SIG
813 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
814#endif
815 ];
816 } rep;
817 struct net *net = sock_net(sk);
818 struct ip_reply_arg arg;
819 struct sock *ctl_sk;
820 u64 transmit_time;
821
822 memset(&rep.th, 0, sizeof(struct tcphdr));
823 memset(&arg, 0, sizeof(arg));
824
825 arg.iov[0].iov_base = (unsigned char *)&rep;
826 arg.iov[0].iov_len = sizeof(rep.th);
827 if (tsecr) {
828 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
829 (TCPOPT_TIMESTAMP << 8) |
830 TCPOLEN_TIMESTAMP);
831 rep.opt[1] = htonl(tsval);
832 rep.opt[2] = htonl(tsecr);
833 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
834 }
835
836 /* Swap the send and the receive. */
837 rep.th.dest = th->source;
838 rep.th.source = th->dest;
839 rep.th.doff = arg.iov[0].iov_len / 4;
840 rep.th.seq = htonl(seq);
841 rep.th.ack_seq = htonl(ack);
842 rep.th.ack = 1;
843 rep.th.window = htons(win);
844
845#ifdef CONFIG_TCP_MD5SIG
846 if (key) {
847 int offset = (tsecr) ? 3 : 0;
848
849 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
850 (TCPOPT_NOP << 16) |
851 (TCPOPT_MD5SIG << 8) |
852 TCPOLEN_MD5SIG);
853 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
854 rep.th.doff = arg.iov[0].iov_len/4;
855
856 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
857 key, ip_hdr(skb)->saddr,
858 ip_hdr(skb)->daddr, &rep.th);
859 }
860#endif
861 arg.flags = reply_flags;
862 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
863 ip_hdr(skb)->saddr, /* XXX */
864 arg.iov[0].iov_len, IPPROTO_TCP, 0);
865 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
866 if (oif)
867 arg.bound_dev_if = oif;
868 arg.tos = tos;
869 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
870 local_bh_disable();
871 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
872 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
873 inet_twsk(sk)->tw_mark : sk->sk_mark;
874 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
875 inet_twsk(sk)->tw_priority : sk->sk_priority;
876 transmit_time = tcp_transmit_time(sk);
877 ip_send_unicast_reply(ctl_sk,
878 skb, &TCP_SKB_CB(skb)->header.h4.opt,
879 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
880 &arg, arg.iov[0].iov_len,
881 transmit_time);
882
883 ctl_sk->sk_mark = 0;
884 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
885 local_bh_enable();
886}
887
888static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
889{
890 struct inet_timewait_sock *tw = inet_twsk(sk);
891 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
892
893 tcp_v4_send_ack(sk, skb,
894 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
895 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
896 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
897 tcptw->tw_ts_recent,
898 tw->tw_bound_dev_if,
899 tcp_twsk_md5_key(tcptw),
900 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
901 tw->tw_tos
902 );
903
904 inet_twsk_put(tw);
905}
906
907static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
908 struct request_sock *req)
909{
910 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
911 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
912 */
913 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
914 tcp_sk(sk)->snd_nxt;
915
916 /* RFC 7323 2.3
917 * The window field (SEG.WND) of every outgoing segment, with the
918 * exception of <SYN> segments, MUST be right-shifted by
919 * Rcv.Wind.Shift bits:
920 */
921 tcp_v4_send_ack(sk, skb, seq,
922 tcp_rsk(req)->rcv_nxt,
923 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
924 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
925 req->ts_recent,
926 0,
927 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
928 AF_INET),
929 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
930 ip_hdr(skb)->tos);
931}
932
933/*
934 * Send a SYN-ACK after having received a SYN.
935 * This still operates on a request_sock only, not on a big
936 * socket.
937 */
938static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
939 struct flowi *fl,
940 struct request_sock *req,
941 struct tcp_fastopen_cookie *foc,
942 enum tcp_synack_type synack_type)
943{
944 const struct inet_request_sock *ireq = inet_rsk(req);
945 struct flowi4 fl4;
946 int err = -1;
947 struct sk_buff *skb;
948
949 /* First, grab a route. */
950 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
951 return -1;
952
953 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
954
955 if (skb) {
956 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
957
958 rcu_read_lock();
959 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
960 ireq->ir_rmt_addr,
961 rcu_dereference(ireq->ireq_opt));
962 rcu_read_unlock();
963 err = net_xmit_eval(err);
964 }
965
966 return err;
967}
968
969/*
970 * IPv4 request_sock destructor.
971 */
972static void tcp_v4_reqsk_destructor(struct request_sock *req)
973{
974 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
975}
976
977#ifdef CONFIG_TCP_MD5SIG
978/*
979 * RFC2385 MD5 checksumming requires a mapping of
980 * IP address->MD5 Key.
981 * We need to maintain these in the sk structure.
982 */
983
984DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
985EXPORT_SYMBOL(tcp_md5_needed);
986
987/* Find the Key structure for an address. */
988struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
989 const union tcp_md5_addr *addr,
990 int family)
991{
992 const struct tcp_sock *tp = tcp_sk(sk);
993 struct tcp_md5sig_key *key;
994 const struct tcp_md5sig_info *md5sig;
995 __be32 mask;
996 struct tcp_md5sig_key *best_match = NULL;
997 bool match;
998
999 /* caller either holds rcu_read_lock() or socket lock */
1000 md5sig = rcu_dereference_check(tp->md5sig_info,
1001 lockdep_sock_is_held(sk));
1002 if (!md5sig)
1003 return NULL;
1004
1005 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1006 if (key->family != family)
1007 continue;
1008
1009 if (family == AF_INET) {
1010 mask = inet_make_mask(key->prefixlen);
1011 match = (key->addr.a4.s_addr & mask) ==
1012 (addr->a4.s_addr & mask);
1013#if IS_ENABLED(CONFIG_IPV6)
1014 } else if (family == AF_INET6) {
1015 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1016 key->prefixlen);
1017#endif
1018 } else {
1019 match = false;
1020 }
1021
1022 if (match && (!best_match ||
1023 key->prefixlen > best_match->prefixlen))
1024 best_match = key;
1025 }
1026 return best_match;
1027}
1028EXPORT_SYMBOL(__tcp_md5_do_lookup);
1029
1030static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1031 const union tcp_md5_addr *addr,
1032 int family, u8 prefixlen)
1033{
1034 const struct tcp_sock *tp = tcp_sk(sk);
1035 struct tcp_md5sig_key *key;
1036 unsigned int size = sizeof(struct in_addr);
1037 const struct tcp_md5sig_info *md5sig;
1038
1039 /* caller either holds rcu_read_lock() or socket lock */
1040 md5sig = rcu_dereference_check(tp->md5sig_info,
1041 lockdep_sock_is_held(sk));
1042 if (!md5sig)
1043 return NULL;
1044#if IS_ENABLED(CONFIG_IPV6)
1045 if (family == AF_INET6)
1046 size = sizeof(struct in6_addr);
1047#endif
1048 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1049 if (key->family != family)
1050 continue;
1051 if (!memcmp(&key->addr, addr, size) &&
1052 key->prefixlen == prefixlen)
1053 return key;
1054 }
1055 return NULL;
1056}
1057
1058struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1059 const struct sock *addr_sk)
1060{
1061 const union tcp_md5_addr *addr;
1062
1063 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1064 return tcp_md5_do_lookup(sk, addr, AF_INET);
1065}
1066EXPORT_SYMBOL(tcp_v4_md5_lookup);
1067
1068/* This can be called on a newly created socket, from other files */
1069int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1070 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1071 gfp_t gfp)
1072{
1073 /* Add Key to the list */
1074 struct tcp_md5sig_key *key;
1075 struct tcp_sock *tp = tcp_sk(sk);
1076 struct tcp_md5sig_info *md5sig;
1077
1078 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1079 if (key) {
1080 /* Pre-existing entry - just update that one. */
1081 memcpy(key->key, newkey, newkeylen);
1082 key->keylen = newkeylen;
1083 return 0;
1084 }
1085
1086 md5sig = rcu_dereference_protected(tp->md5sig_info,
1087 lockdep_sock_is_held(sk));
1088 if (!md5sig) {
1089 md5sig = kmalloc(sizeof(*md5sig), gfp);
1090 if (!md5sig)
1091 return -ENOMEM;
1092
1093 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1094 INIT_HLIST_HEAD(&md5sig->head);
1095 rcu_assign_pointer(tp->md5sig_info, md5sig);
1096 }
1097
1098 key = sock_kmalloc(sk, sizeof(*key), gfp);
1099 if (!key)
1100 return -ENOMEM;
1101 if (!tcp_alloc_md5sig_pool()) {
1102 sock_kfree_s(sk, key, sizeof(*key));
1103 return -ENOMEM;
1104 }
1105
1106 memcpy(key->key, newkey, newkeylen);
1107 key->keylen = newkeylen;
1108 key->family = family;
1109 key->prefixlen = prefixlen;
1110 memcpy(&key->addr, addr,
1111 (family == AF_INET6) ? sizeof(struct in6_addr) :
1112 sizeof(struct in_addr));
1113 hlist_add_head_rcu(&key->node, &md5sig->head);
1114 return 0;
1115}
1116EXPORT_SYMBOL(tcp_md5_do_add);
1117
1118int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1119 u8 prefixlen)
1120{
1121 struct tcp_md5sig_key *key;
1122
1123 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1124 if (!key)
1125 return -ENOENT;
1126 hlist_del_rcu(&key->node);
1127 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1128 kfree_rcu(key, rcu);
1129 return 0;
1130}
1131EXPORT_SYMBOL(tcp_md5_do_del);
1132
1133static void tcp_clear_md5_list(struct sock *sk)
1134{
1135 struct tcp_sock *tp = tcp_sk(sk);
1136 struct tcp_md5sig_key *key;
1137 struct hlist_node *n;
1138 struct tcp_md5sig_info *md5sig;
1139
1140 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1141
1142 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1143 hlist_del_rcu(&key->node);
1144 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1145 kfree_rcu(key, rcu);
1146 }
1147}
1148
1149static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1150 char __user *optval, int optlen)
1151{
1152 struct tcp_md5sig cmd;
1153 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1154 u8 prefixlen = 32;
1155
1156 if (optlen < sizeof(cmd))
1157 return -EINVAL;
1158
1159 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1160 return -EFAULT;
1161
1162 if (sin->sin_family != AF_INET)
1163 return -EINVAL;
1164
1165 if (optname == TCP_MD5SIG_EXT &&
1166 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1167 prefixlen = cmd.tcpm_prefixlen;
1168 if (prefixlen > 32)
1169 return -EINVAL;
1170 }
1171
1172 if (!cmd.tcpm_keylen)
1173 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1174 AF_INET, prefixlen);
1175
1176 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1177 return -EINVAL;
1178
1179 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1180 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1181 GFP_KERNEL);
1182}
1183
1184static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1185 __be32 daddr, __be32 saddr,
1186 const struct tcphdr *th, int nbytes)
1187{
1188 struct tcp4_pseudohdr *bp;
1189 struct scatterlist sg;
1190 struct tcphdr *_th;
1191
1192 bp = hp->scratch;
1193 bp->saddr = saddr;
1194 bp->daddr = daddr;
1195 bp->pad = 0;
1196 bp->protocol = IPPROTO_TCP;
1197 bp->len = cpu_to_be16(nbytes);
1198
1199 _th = (struct tcphdr *)(bp + 1);
1200 memcpy(_th, th, sizeof(*th));
1201 _th->check = 0;
1202
1203 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1204 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1205 sizeof(*bp) + sizeof(*th));
1206 return crypto_ahash_update(hp->md5_req);
1207}
1208
1209static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1210 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1211{
1212 struct tcp_md5sig_pool *hp;
1213 struct ahash_request *req;
1214
1215 hp = tcp_get_md5sig_pool();
1216 if (!hp)
1217 goto clear_hash_noput;
1218 req = hp->md5_req;
1219
1220 if (crypto_ahash_init(req))
1221 goto clear_hash;
1222 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1223 goto clear_hash;
1224 if (tcp_md5_hash_key(hp, key))
1225 goto clear_hash;
1226 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1227 if (crypto_ahash_final(req))
1228 goto clear_hash;
1229
1230 tcp_put_md5sig_pool();
1231 return 0;
1232
1233clear_hash:
1234 tcp_put_md5sig_pool();
1235clear_hash_noput:
1236 memset(md5_hash, 0, 16);
1237 return 1;
1238}
1239
1240int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1241 const struct sock *sk,
1242 const struct sk_buff *skb)
1243{
1244 struct tcp_md5sig_pool *hp;
1245 struct ahash_request *req;
1246 const struct tcphdr *th = tcp_hdr(skb);
1247 __be32 saddr, daddr;
1248
1249 if (sk) { /* valid for establish/request sockets */
1250 saddr = sk->sk_rcv_saddr;
1251 daddr = sk->sk_daddr;
1252 } else {
1253 const struct iphdr *iph = ip_hdr(skb);
1254 saddr = iph->saddr;
1255 daddr = iph->daddr;
1256 }
1257
1258 hp = tcp_get_md5sig_pool();
1259 if (!hp)
1260 goto clear_hash_noput;
1261 req = hp->md5_req;
1262
1263 if (crypto_ahash_init(req))
1264 goto clear_hash;
1265
1266 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1267 goto clear_hash;
1268 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1269 goto clear_hash;
1270 if (tcp_md5_hash_key(hp, key))
1271 goto clear_hash;
1272 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1273 if (crypto_ahash_final(req))
1274 goto clear_hash;
1275
1276 tcp_put_md5sig_pool();
1277 return 0;
1278
1279clear_hash:
1280 tcp_put_md5sig_pool();
1281clear_hash_noput:
1282 memset(md5_hash, 0, 16);
1283 return 1;
1284}
1285EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1286
1287#endif
1288
1289/* Called with rcu_read_lock() */
1290static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1291 const struct sk_buff *skb)
1292{
1293#ifdef CONFIG_TCP_MD5SIG
1294 /*
1295 * This gets called for each TCP segment that arrives
1296 * so we want to be efficient.
1297 * We have 3 drop cases:
1298 * o No MD5 hash and one expected.
1299 * o MD5 hash and we're not expecting one.
1300 * o MD5 hash and its wrong.
1301 */
1302 const __u8 *hash_location = NULL;
1303 struct tcp_md5sig_key *hash_expected;
1304 const struct iphdr *iph = ip_hdr(skb);
1305 const struct tcphdr *th = tcp_hdr(skb);
1306 int genhash;
1307 unsigned char newhash[16];
1308
1309 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1310 AF_INET);
1311 hash_location = tcp_parse_md5sig_option(th);
1312
1313 /* We've parsed the options - do we have a hash? */
1314 if (!hash_expected && !hash_location)
1315 return false;
1316
1317 if (hash_expected && !hash_location) {
1318 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1319 return true;
1320 }
1321
1322 if (!hash_expected && hash_location) {
1323 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1324 return true;
1325 }
1326
1327 /* Okay, so this is hash_expected and hash_location -
1328 * so we need to calculate the checksum.
1329 */
1330 genhash = tcp_v4_md5_hash_skb(newhash,
1331 hash_expected,
1332 NULL, skb);
1333
1334 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1335 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1336 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1337 &iph->saddr, ntohs(th->source),
1338 &iph->daddr, ntohs(th->dest),
1339 genhash ? " tcp_v4_calc_md5_hash failed"
1340 : "");
1341 return true;
1342 }
1343 return false;
1344#endif
1345 return false;
1346}
1347
1348static void tcp_v4_init_req(struct request_sock *req,
1349 const struct sock *sk_listener,
1350 struct sk_buff *skb)
1351{
1352 struct inet_request_sock *ireq = inet_rsk(req);
1353 struct net *net = sock_net(sk_listener);
1354
1355 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1356 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1357 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1358}
1359
1360static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1361 struct flowi *fl,
1362 const struct request_sock *req)
1363{
1364 return inet_csk_route_req(sk, &fl->u.ip4, req);
1365}
1366
1367struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1368 .family = PF_INET,
1369 .obj_size = sizeof(struct tcp_request_sock),
1370 .rtx_syn_ack = tcp_rtx_synack,
1371 .send_ack = tcp_v4_reqsk_send_ack,
1372 .destructor = tcp_v4_reqsk_destructor,
1373 .send_reset = tcp_v4_send_reset,
1374 .syn_ack_timeout = tcp_syn_ack_timeout,
1375};
1376
1377static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1378 .mss_clamp = TCP_MSS_DEFAULT,
1379#ifdef CONFIG_TCP_MD5SIG
1380 .req_md5_lookup = tcp_v4_md5_lookup,
1381 .calc_md5_hash = tcp_v4_md5_hash_skb,
1382#endif
1383 .init_req = tcp_v4_init_req,
1384#ifdef CONFIG_SYN_COOKIES
1385 .cookie_init_seq = cookie_v4_init_sequence,
1386#endif
1387 .route_req = tcp_v4_route_req,
1388 .init_seq = tcp_v4_init_seq,
1389 .init_ts_off = tcp_v4_init_ts_off,
1390 .send_synack = tcp_v4_send_synack,
1391};
1392
1393int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1394{
1395 /* Never answer to SYNs send to broadcast or multicast */
1396 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1397 goto drop;
1398
1399 return tcp_conn_request(&tcp_request_sock_ops,
1400 &tcp_request_sock_ipv4_ops, sk, skb);
1401
1402drop:
1403 tcp_listendrop(sk);
1404 return 0;
1405}
1406EXPORT_SYMBOL(tcp_v4_conn_request);
1407
1408
1409/*
1410 * The three way handshake has completed - we got a valid synack -
1411 * now create the new socket.
1412 */
1413struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1414 struct request_sock *req,
1415 struct dst_entry *dst,
1416 struct request_sock *req_unhash,
1417 bool *own_req)
1418{
1419 struct inet_request_sock *ireq;
1420 struct inet_sock *newinet;
1421 struct tcp_sock *newtp;
1422 struct sock *newsk;
1423#ifdef CONFIG_TCP_MD5SIG
1424 struct tcp_md5sig_key *key;
1425#endif
1426 struct ip_options_rcu *inet_opt;
1427
1428 if (sk_acceptq_is_full(sk))
1429 goto exit_overflow;
1430
1431 newsk = tcp_create_openreq_child(sk, req, skb);
1432 if (!newsk)
1433 goto exit_nonewsk;
1434
1435 newsk->sk_gso_type = SKB_GSO_TCPV4;
1436 inet_sk_rx_dst_set(newsk, skb);
1437
1438 newtp = tcp_sk(newsk);
1439 newinet = inet_sk(newsk);
1440 ireq = inet_rsk(req);
1441 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1442 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1443 newsk->sk_bound_dev_if = ireq->ir_iif;
1444 newinet->inet_saddr = ireq->ir_loc_addr;
1445 inet_opt = rcu_dereference(ireq->ireq_opt);
1446 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1447 newinet->mc_index = inet_iif(skb);
1448 newinet->mc_ttl = ip_hdr(skb)->ttl;
1449 newinet->rcv_tos = ip_hdr(skb)->tos;
1450 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1451 if (inet_opt)
1452 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1453 newinet->inet_id = prandom_u32();
1454
1455 if (!dst) {
1456 dst = inet_csk_route_child_sock(sk, newsk, req);
1457 if (!dst)
1458 goto put_and_exit;
1459 } else {
1460 /* syncookie case : see end of cookie_v4_check() */
1461 }
1462 sk_setup_caps(newsk, dst);
1463
1464 tcp_ca_openreq_child(newsk, dst);
1465
1466 tcp_sync_mss(newsk, dst_mtu(dst));
1467 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1468
1469 tcp_initialize_rcv_mss(newsk);
1470
1471#ifdef CONFIG_TCP_MD5SIG
1472 /* Copy over the MD5 key from the original socket */
1473 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1474 AF_INET);
1475 if (key) {
1476 /*
1477 * We're using one, so create a matching key
1478 * on the newsk structure. If we fail to get
1479 * memory, then we end up not copying the key
1480 * across. Shucks.
1481 */
1482 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1483 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1484 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1485 }
1486#endif
1487
1488 if (__inet_inherit_port(sk, newsk) < 0)
1489 goto put_and_exit;
1490 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1491 if (likely(*own_req)) {
1492 tcp_move_syn(newtp, req);
1493 ireq->ireq_opt = NULL;
1494 } else {
1495 newinet->inet_opt = NULL;
1496 }
1497 return newsk;
1498
1499exit_overflow:
1500 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1501exit_nonewsk:
1502 dst_release(dst);
1503exit:
1504 tcp_listendrop(sk);
1505 return NULL;
1506put_and_exit:
1507 newinet->inet_opt = NULL;
1508 inet_csk_prepare_forced_close(newsk);
1509 tcp_done(newsk);
1510 goto exit;
1511}
1512EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1513
1514static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1515{
1516#ifdef CONFIG_SYN_COOKIES
1517 const struct tcphdr *th = tcp_hdr(skb);
1518
1519 if (!th->syn)
1520 sk = cookie_v4_check(sk, skb);
1521#endif
1522 return sk;
1523}
1524
1525u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1526 struct tcphdr *th, u32 *cookie)
1527{
1528 u16 mss = 0;
1529#ifdef CONFIG_SYN_COOKIES
1530 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1531 &tcp_request_sock_ipv4_ops, sk, th);
1532 if (mss) {
1533 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1534 tcp_synq_overflow(sk);
1535 }
1536#endif
1537 return mss;
1538}
1539
1540/* The socket must have it's spinlock held when we get
1541 * here, unless it is a TCP_LISTEN socket.
1542 *
1543 * We have a potential double-lock case here, so even when
1544 * doing backlog processing we use the BH locking scheme.
1545 * This is because we cannot sleep with the original spinlock
1546 * held.
1547 */
1548int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1549{
1550 struct sock *rsk;
1551
1552 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1553 struct dst_entry *dst = sk->sk_rx_dst;
1554
1555 sock_rps_save_rxhash(sk, skb);
1556 sk_mark_napi_id(sk, skb);
1557 if (dst) {
1558 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1559 !dst->ops->check(dst, 0)) {
1560 dst_release(dst);
1561 sk->sk_rx_dst = NULL;
1562 }
1563 }
1564 tcp_rcv_established(sk, skb);
1565 return 0;
1566 }
1567
1568 if (tcp_checksum_complete(skb))
1569 goto csum_err;
1570
1571 if (sk->sk_state == TCP_LISTEN) {
1572 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1573
1574 if (!nsk)
1575 goto discard;
1576 if (nsk != sk) {
1577 if (tcp_child_process(sk, nsk, skb)) {
1578 rsk = nsk;
1579 goto reset;
1580 }
1581 return 0;
1582 }
1583 } else
1584 sock_rps_save_rxhash(sk, skb);
1585
1586 if (tcp_rcv_state_process(sk, skb)) {
1587 rsk = sk;
1588 goto reset;
1589 }
1590 return 0;
1591
1592reset:
1593 tcp_v4_send_reset(rsk, skb);
1594discard:
1595 kfree_skb(skb);
1596 /* Be careful here. If this function gets more complicated and
1597 * gcc suffers from register pressure on the x86, sk (in %ebx)
1598 * might be destroyed here. This current version compiles correctly,
1599 * but you have been warned.
1600 */
1601 return 0;
1602
1603csum_err:
1604 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1605 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1606 goto discard;
1607}
1608EXPORT_SYMBOL(tcp_v4_do_rcv);
1609
1610int tcp_v4_early_demux(struct sk_buff *skb)
1611{
1612 const struct iphdr *iph;
1613 const struct tcphdr *th;
1614 struct sock *sk;
1615
1616 if (skb->pkt_type != PACKET_HOST)
1617 return 0;
1618
1619 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1620 return 0;
1621
1622 iph = ip_hdr(skb);
1623 th = tcp_hdr(skb);
1624
1625 if (th->doff < sizeof(struct tcphdr) / 4)
1626 return 0;
1627
1628 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1629 iph->saddr, th->source,
1630 iph->daddr, ntohs(th->dest),
1631 skb->skb_iif, inet_sdif(skb));
1632 if (sk) {
1633 skb->sk = sk;
1634 skb->destructor = sock_edemux;
1635 if (sk_fullsock(sk)) {
1636 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1637
1638 if (dst)
1639 dst = dst_check(dst, 0);
1640 if (dst &&
1641 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1642 skb_dst_set_noref(skb, dst);
1643 }
1644 }
1645 return 0;
1646}
1647
1648bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1649{
1650 u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
1651 struct skb_shared_info *shinfo;
1652 const struct tcphdr *th;
1653 struct tcphdr *thtail;
1654 struct sk_buff *tail;
1655 unsigned int hdrlen;
1656 bool fragstolen;
1657 u32 gso_segs;
1658 int delta;
1659
1660 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1661 * we can fix skb->truesize to its real value to avoid future drops.
1662 * This is valid because skb is not yet charged to the socket.
1663 * It has been noticed pure SACK packets were sometimes dropped
1664 * (if cooked by drivers without copybreak feature).
1665 */
1666 skb_condense(skb);
1667
1668 skb_dst_drop(skb);
1669
1670 if (unlikely(tcp_checksum_complete(skb))) {
1671 bh_unlock_sock(sk);
1672 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1673 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1674 return true;
1675 }
1676
1677 /* Attempt coalescing to last skb in backlog, even if we are
1678 * above the limits.
1679 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1680 */
1681 th = (const struct tcphdr *)skb->data;
1682 hdrlen = th->doff * 4;
1683 shinfo = skb_shinfo(skb);
1684
1685 if (!shinfo->gso_size)
1686 shinfo->gso_size = skb->len - hdrlen;
1687
1688 if (!shinfo->gso_segs)
1689 shinfo->gso_segs = 1;
1690
1691 tail = sk->sk_backlog.tail;
1692 if (!tail)
1693 goto no_coalesce;
1694 thtail = (struct tcphdr *)tail->data;
1695
1696 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1697 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1698 ((TCP_SKB_CB(tail)->tcp_flags |
1699 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1700 !((TCP_SKB_CB(tail)->tcp_flags &
1701 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1702 ((TCP_SKB_CB(tail)->tcp_flags ^
1703 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1704#ifdef CONFIG_TLS_DEVICE
1705 tail->decrypted != skb->decrypted ||
1706#endif
1707 thtail->doff != th->doff ||
1708 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1709 goto no_coalesce;
1710
1711 __skb_pull(skb, hdrlen);
1712 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1713 thtail->window = th->window;
1714
1715 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1716
1717 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1718 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1719
1720 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1721 * thtail->fin, so that the fast path in tcp_rcv_established()
1722 * is not entered if we append a packet with a FIN.
1723 * SYN, RST, URG are not present.
1724 * ACK is set on both packets.
1725 * PSH : we do not really care in TCP stack,
1726 * at least for 'GRO' packets.
1727 */
1728 thtail->fin |= th->fin;
1729 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1730
1731 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1732 TCP_SKB_CB(tail)->has_rxtstamp = true;
1733 tail->tstamp = skb->tstamp;
1734 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1735 }
1736
1737 /* Not as strict as GRO. We only need to carry mss max value */
1738 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1739 skb_shinfo(tail)->gso_size);
1740
1741 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1742 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1743
1744 sk->sk_backlog.len += delta;
1745 __NET_INC_STATS(sock_net(sk),
1746 LINUX_MIB_TCPBACKLOGCOALESCE);
1747 kfree_skb_partial(skb, fragstolen);
1748 return false;
1749 }
1750 __skb_push(skb, hdrlen);
1751
1752no_coalesce:
1753 /* Only socket owner can try to collapse/prune rx queues
1754 * to reduce memory overhead, so add a little headroom here.
1755 * Few sockets backlog are possibly concurrently non empty.
1756 */
1757 limit += 64*1024;
1758
1759 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1760 bh_unlock_sock(sk);
1761 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1762 return true;
1763 }
1764 return false;
1765}
1766EXPORT_SYMBOL(tcp_add_backlog);
1767
1768int tcp_filter(struct sock *sk, struct sk_buff *skb)
1769{
1770 struct tcphdr *th = (struct tcphdr *)skb->data;
1771
1772 return sk_filter_trim_cap(sk, skb, th->doff * 4);
1773}
1774EXPORT_SYMBOL(tcp_filter);
1775
1776static void tcp_v4_restore_cb(struct sk_buff *skb)
1777{
1778 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1779 sizeof(struct inet_skb_parm));
1780}
1781
1782static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1783 const struct tcphdr *th)
1784{
1785 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1786 * barrier() makes sure compiler wont play fool^Waliasing games.
1787 */
1788 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1789 sizeof(struct inet_skb_parm));
1790 barrier();
1791
1792 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1793 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1794 skb->len - th->doff * 4);
1795 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1796 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1797 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1798 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1799 TCP_SKB_CB(skb)->sacked = 0;
1800 TCP_SKB_CB(skb)->has_rxtstamp =
1801 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1802}
1803
1804/*
1805 * From tcp_input.c
1806 */
1807
1808int tcp_v4_rcv(struct sk_buff *skb)
1809{
1810 struct net *net = dev_net(skb->dev);
1811 struct sk_buff *skb_to_free;
1812 int sdif = inet_sdif(skb);
1813 const struct iphdr *iph;
1814 const struct tcphdr *th;
1815 bool refcounted;
1816 struct sock *sk;
1817 int ret;
1818
1819 if (skb->pkt_type != PACKET_HOST)
1820 goto discard_it;
1821
1822 /* Count it even if it's bad */
1823 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1824
1825 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1826 goto discard_it;
1827
1828 th = (const struct tcphdr *)skb->data;
1829
1830 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1831 goto bad_packet;
1832 if (!pskb_may_pull(skb, th->doff * 4))
1833 goto discard_it;
1834
1835 /* An explanation is required here, I think.
1836 * Packet length and doff are validated by header prediction,
1837 * provided case of th->doff==0 is eliminated.
1838 * So, we defer the checks. */
1839
1840 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1841 goto csum_error;
1842
1843 th = (const struct tcphdr *)skb->data;
1844 iph = ip_hdr(skb);
1845lookup:
1846 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1847 th->dest, sdif, &refcounted);
1848 if (!sk)
1849 goto no_tcp_socket;
1850
1851process:
1852 if (sk->sk_state == TCP_TIME_WAIT)
1853 goto do_time_wait;
1854
1855 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1856 struct request_sock *req = inet_reqsk(sk);
1857 bool req_stolen = false;
1858 struct sock *nsk;
1859
1860 sk = req->rsk_listener;
1861 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1862 sk_drops_add(sk, skb);
1863 reqsk_put(req);
1864 goto discard_it;
1865 }
1866 if (tcp_checksum_complete(skb)) {
1867 reqsk_put(req);
1868 goto csum_error;
1869 }
1870 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1871 inet_csk_reqsk_queue_drop_and_put(sk, req);
1872 goto lookup;
1873 }
1874 /* We own a reference on the listener, increase it again
1875 * as we might lose it too soon.
1876 */
1877 sock_hold(sk);
1878 refcounted = true;
1879 nsk = NULL;
1880 if (!tcp_filter(sk, skb)) {
1881 th = (const struct tcphdr *)skb->data;
1882 iph = ip_hdr(skb);
1883 tcp_v4_fill_cb(skb, iph, th);
1884 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1885 }
1886 if (!nsk) {
1887 reqsk_put(req);
1888 if (req_stolen) {
1889 /* Another cpu got exclusive access to req
1890 * and created a full blown socket.
1891 * Try to feed this packet to this socket
1892 * instead of discarding it.
1893 */
1894 tcp_v4_restore_cb(skb);
1895 sock_put(sk);
1896 goto lookup;
1897 }
1898 goto discard_and_relse;
1899 }
1900 if (nsk == sk) {
1901 reqsk_put(req);
1902 tcp_v4_restore_cb(skb);
1903 } else if (tcp_child_process(sk, nsk, skb)) {
1904 tcp_v4_send_reset(nsk, skb);
1905 goto discard_and_relse;
1906 } else {
1907 sock_put(sk);
1908 return 0;
1909 }
1910 }
1911 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1912 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1913 goto discard_and_relse;
1914 }
1915
1916 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1917 goto discard_and_relse;
1918
1919 if (tcp_v4_inbound_md5_hash(sk, skb))
1920 goto discard_and_relse;
1921
1922 nf_reset_ct(skb);
1923
1924 if (tcp_filter(sk, skb))
1925 goto discard_and_relse;
1926 th = (const struct tcphdr *)skb->data;
1927 iph = ip_hdr(skb);
1928 tcp_v4_fill_cb(skb, iph, th);
1929
1930 skb->dev = NULL;
1931
1932 if (sk->sk_state == TCP_LISTEN) {
1933 ret = tcp_v4_do_rcv(sk, skb);
1934 goto put_and_return;
1935 }
1936
1937 sk_incoming_cpu_update(sk);
1938
1939 bh_lock_sock_nested(sk);
1940 tcp_segs_in(tcp_sk(sk), skb);
1941 ret = 0;
1942 if (!sock_owned_by_user(sk)) {
1943 skb_to_free = sk->sk_rx_skb_cache;
1944 sk->sk_rx_skb_cache = NULL;
1945 ret = tcp_v4_do_rcv(sk, skb);
1946 } else {
1947 if (tcp_add_backlog(sk, skb))
1948 goto discard_and_relse;
1949 skb_to_free = NULL;
1950 }
1951 bh_unlock_sock(sk);
1952 if (skb_to_free)
1953 __kfree_skb(skb_to_free);
1954
1955put_and_return:
1956 if (refcounted)
1957 sock_put(sk);
1958
1959 return ret;
1960
1961no_tcp_socket:
1962 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1963 goto discard_it;
1964
1965 tcp_v4_fill_cb(skb, iph, th);
1966
1967 if (tcp_checksum_complete(skb)) {
1968csum_error:
1969 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1970bad_packet:
1971 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1972 } else {
1973 tcp_v4_send_reset(NULL, skb);
1974 }
1975
1976discard_it:
1977 /* Discard frame. */
1978 kfree_skb(skb);
1979 return 0;
1980
1981discard_and_relse:
1982 sk_drops_add(sk, skb);
1983 if (refcounted)
1984 sock_put(sk);
1985 goto discard_it;
1986
1987do_time_wait:
1988 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1989 inet_twsk_put(inet_twsk(sk));
1990 goto discard_it;
1991 }
1992
1993 tcp_v4_fill_cb(skb, iph, th);
1994
1995 if (tcp_checksum_complete(skb)) {
1996 inet_twsk_put(inet_twsk(sk));
1997 goto csum_error;
1998 }
1999 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2000 case TCP_TW_SYN: {
2001 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2002 &tcp_hashinfo, skb,
2003 __tcp_hdrlen(th),
2004 iph->saddr, th->source,
2005 iph->daddr, th->dest,
2006 inet_iif(skb),
2007 sdif);
2008 if (sk2) {
2009 inet_twsk_deschedule_put(inet_twsk(sk));
2010 sk = sk2;
2011 tcp_v4_restore_cb(skb);
2012 refcounted = false;
2013 goto process;
2014 }
2015 }
2016 /* to ACK */
2017 /* fall through */
2018 case TCP_TW_ACK:
2019 tcp_v4_timewait_ack(sk, skb);
2020 break;
2021 case TCP_TW_RST:
2022 tcp_v4_send_reset(sk, skb);
2023 inet_twsk_deschedule_put(inet_twsk(sk));
2024 goto discard_it;
2025 case TCP_TW_SUCCESS:;
2026 }
2027 goto discard_it;
2028}
2029
2030static struct timewait_sock_ops tcp_timewait_sock_ops = {
2031 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2032 .twsk_unique = tcp_twsk_unique,
2033 .twsk_destructor= tcp_twsk_destructor,
2034};
2035
2036void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2037{
2038 struct dst_entry *dst = skb_dst(skb);
2039
2040 if (dst && dst_hold_safe(dst)) {
2041 sk->sk_rx_dst = dst;
2042 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2043 }
2044}
2045EXPORT_SYMBOL(inet_sk_rx_dst_set);
2046
2047const struct inet_connection_sock_af_ops ipv4_specific = {
2048 .queue_xmit = ip_queue_xmit,
2049 .send_check = tcp_v4_send_check,
2050 .rebuild_header = inet_sk_rebuild_header,
2051 .sk_rx_dst_set = inet_sk_rx_dst_set,
2052 .conn_request = tcp_v4_conn_request,
2053 .syn_recv_sock = tcp_v4_syn_recv_sock,
2054 .net_header_len = sizeof(struct iphdr),
2055 .setsockopt = ip_setsockopt,
2056 .getsockopt = ip_getsockopt,
2057 .addr2sockaddr = inet_csk_addr2sockaddr,
2058 .sockaddr_len = sizeof(struct sockaddr_in),
2059#ifdef CONFIG_COMPAT
2060 .compat_setsockopt = compat_ip_setsockopt,
2061 .compat_getsockopt = compat_ip_getsockopt,
2062#endif
2063 .mtu_reduced = tcp_v4_mtu_reduced,
2064};
2065EXPORT_SYMBOL(ipv4_specific);
2066
2067#ifdef CONFIG_TCP_MD5SIG
2068static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2069 .md5_lookup = tcp_v4_md5_lookup,
2070 .calc_md5_hash = tcp_v4_md5_hash_skb,
2071 .md5_parse = tcp_v4_parse_md5_keys,
2072};
2073#endif
2074
2075/* NOTE: A lot of things set to zero explicitly by call to
2076 * sk_alloc() so need not be done here.
2077 */
2078static int tcp_v4_init_sock(struct sock *sk)
2079{
2080 struct inet_connection_sock *icsk = inet_csk(sk);
2081
2082 tcp_init_sock(sk);
2083
2084 icsk->icsk_af_ops = &ipv4_specific;
2085
2086#ifdef CONFIG_TCP_MD5SIG
2087 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2088#endif
2089
2090 return 0;
2091}
2092
2093void tcp_v4_destroy_sock(struct sock *sk)
2094{
2095 struct tcp_sock *tp = tcp_sk(sk);
2096
2097 trace_tcp_destroy_sock(sk);
2098
2099 tcp_clear_xmit_timers(sk);
2100
2101 tcp_cleanup_congestion_control(sk);
2102
2103 tcp_cleanup_ulp(sk);
2104
2105 /* Cleanup up the write buffer. */
2106 tcp_write_queue_purge(sk);
2107
2108 /* Check if we want to disable active TFO */
2109 tcp_fastopen_active_disable_ofo_check(sk);
2110
2111 /* Cleans up our, hopefully empty, out_of_order_queue. */
2112 skb_rbtree_purge(&tp->out_of_order_queue);
2113
2114#ifdef CONFIG_TCP_MD5SIG
2115 /* Clean up the MD5 key list, if any */
2116 if (tp->md5sig_info) {
2117 tcp_clear_md5_list(sk);
2118 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2119 tp->md5sig_info = NULL;
2120 }
2121#endif
2122
2123 /* Clean up a referenced TCP bind bucket. */
2124 if (inet_csk(sk)->icsk_bind_hash)
2125 inet_put_port(sk);
2126
2127 BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2128
2129 /* If socket is aborted during connect operation */
2130 tcp_free_fastopen_req(tp);
2131 tcp_fastopen_destroy_cipher(sk);
2132 tcp_saved_syn_free(tp);
2133
2134 sk_sockets_allocated_dec(sk);
2135}
2136EXPORT_SYMBOL(tcp_v4_destroy_sock);
2137
2138#ifdef CONFIG_PROC_FS
2139/* Proc filesystem TCP sock list dumping. */
2140
2141/*
2142 * Get next listener socket follow cur. If cur is NULL, get first socket
2143 * starting from bucket given in st->bucket; when st->bucket is zero the
2144 * very first socket in the hash table is returned.
2145 */
2146static void *listening_get_next(struct seq_file *seq, void *cur)
2147{
2148 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2149 struct tcp_iter_state *st = seq->private;
2150 struct net *net = seq_file_net(seq);
2151 struct inet_listen_hashbucket *ilb;
2152 struct sock *sk = cur;
2153
2154 if (!sk) {
2155get_head:
2156 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2157 spin_lock(&ilb->lock);
2158 sk = sk_head(&ilb->head);
2159 st->offset = 0;
2160 goto get_sk;
2161 }
2162 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2163 ++st->num;
2164 ++st->offset;
2165
2166 sk = sk_next(sk);
2167get_sk:
2168 sk_for_each_from(sk) {
2169 if (!net_eq(sock_net(sk), net))
2170 continue;
2171 if (sk->sk_family == afinfo->family)
2172 return sk;
2173 }
2174 spin_unlock(&ilb->lock);
2175 st->offset = 0;
2176 if (++st->bucket < INET_LHTABLE_SIZE)
2177 goto get_head;
2178 return NULL;
2179}
2180
2181static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2182{
2183 struct tcp_iter_state *st = seq->private;
2184 void *rc;
2185
2186 st->bucket = 0;
2187 st->offset = 0;
2188 rc = listening_get_next(seq, NULL);
2189
2190 while (rc && *pos) {
2191 rc = listening_get_next(seq, rc);
2192 --*pos;
2193 }
2194 return rc;
2195}
2196
2197static inline bool empty_bucket(const struct tcp_iter_state *st)
2198{
2199 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2200}
2201
2202/*
2203 * Get first established socket starting from bucket given in st->bucket.
2204 * If st->bucket is zero, the very first socket in the hash is returned.
2205 */
2206static void *established_get_first(struct seq_file *seq)
2207{
2208 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2209 struct tcp_iter_state *st = seq->private;
2210 struct net *net = seq_file_net(seq);
2211 void *rc = NULL;
2212
2213 st->offset = 0;
2214 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2215 struct sock *sk;
2216 struct hlist_nulls_node *node;
2217 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2218
2219 /* Lockless fast path for the common case of empty buckets */
2220 if (empty_bucket(st))
2221 continue;
2222
2223 spin_lock_bh(lock);
2224 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2225 if (sk->sk_family != afinfo->family ||
2226 !net_eq(sock_net(sk), net)) {
2227 continue;
2228 }
2229 rc = sk;
2230 goto out;
2231 }
2232 spin_unlock_bh(lock);
2233 }
2234out:
2235 return rc;
2236}
2237
2238static void *established_get_next(struct seq_file *seq, void *cur)
2239{
2240 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2241 struct sock *sk = cur;
2242 struct hlist_nulls_node *node;
2243 struct tcp_iter_state *st = seq->private;
2244 struct net *net = seq_file_net(seq);
2245
2246 ++st->num;
2247 ++st->offset;
2248
2249 sk = sk_nulls_next(sk);
2250
2251 sk_nulls_for_each_from(sk, node) {
2252 if (sk->sk_family == afinfo->family &&
2253 net_eq(sock_net(sk), net))
2254 return sk;
2255 }
2256
2257 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2258 ++st->bucket;
2259 return established_get_first(seq);
2260}
2261
2262static void *established_get_idx(struct seq_file *seq, loff_t pos)
2263{
2264 struct tcp_iter_state *st = seq->private;
2265 void *rc;
2266
2267 st->bucket = 0;
2268 rc = established_get_first(seq);
2269
2270 while (rc && pos) {
2271 rc = established_get_next(seq, rc);
2272 --pos;
2273 }
2274 return rc;
2275}
2276
2277static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2278{
2279 void *rc;
2280 struct tcp_iter_state *st = seq->private;
2281
2282 st->state = TCP_SEQ_STATE_LISTENING;
2283 rc = listening_get_idx(seq, &pos);
2284
2285 if (!rc) {
2286 st->state = TCP_SEQ_STATE_ESTABLISHED;
2287 rc = established_get_idx(seq, pos);
2288 }
2289
2290 return rc;
2291}
2292
2293static void *tcp_seek_last_pos(struct seq_file *seq)
2294{
2295 struct tcp_iter_state *st = seq->private;
2296 int offset = st->offset;
2297 int orig_num = st->num;
2298 void *rc = NULL;
2299
2300 switch (st->state) {
2301 case TCP_SEQ_STATE_LISTENING:
2302 if (st->bucket >= INET_LHTABLE_SIZE)
2303 break;
2304 st->state = TCP_SEQ_STATE_LISTENING;
2305 rc = listening_get_next(seq, NULL);
2306 while (offset-- && rc)
2307 rc = listening_get_next(seq, rc);
2308 if (rc)
2309 break;
2310 st->bucket = 0;
2311 st->state = TCP_SEQ_STATE_ESTABLISHED;
2312 /* Fallthrough */
2313 case TCP_SEQ_STATE_ESTABLISHED:
2314 if (st->bucket > tcp_hashinfo.ehash_mask)
2315 break;
2316 rc = established_get_first(seq);
2317 while (offset-- && rc)
2318 rc = established_get_next(seq, rc);
2319 }
2320
2321 st->num = orig_num;
2322
2323 return rc;
2324}
2325
2326void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2327{
2328 struct tcp_iter_state *st = seq->private;
2329 void *rc;
2330
2331 if (*pos && *pos == st->last_pos) {
2332 rc = tcp_seek_last_pos(seq);
2333 if (rc)
2334 goto out;
2335 }
2336
2337 st->state = TCP_SEQ_STATE_LISTENING;
2338 st->num = 0;
2339 st->bucket = 0;
2340 st->offset = 0;
2341 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2342
2343out:
2344 st->last_pos = *pos;
2345 return rc;
2346}
2347EXPORT_SYMBOL(tcp_seq_start);
2348
2349void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2350{
2351 struct tcp_iter_state *st = seq->private;
2352 void *rc = NULL;
2353
2354 if (v == SEQ_START_TOKEN) {
2355 rc = tcp_get_idx(seq, 0);
2356 goto out;
2357 }
2358
2359 switch (st->state) {
2360 case TCP_SEQ_STATE_LISTENING:
2361 rc = listening_get_next(seq, v);
2362 if (!rc) {
2363 st->state = TCP_SEQ_STATE_ESTABLISHED;
2364 st->bucket = 0;
2365 st->offset = 0;
2366 rc = established_get_first(seq);
2367 }
2368 break;
2369 case TCP_SEQ_STATE_ESTABLISHED:
2370 rc = established_get_next(seq, v);
2371 break;
2372 }
2373out:
2374 ++*pos;
2375 st->last_pos = *pos;
2376 return rc;
2377}
2378EXPORT_SYMBOL(tcp_seq_next);
2379
2380void tcp_seq_stop(struct seq_file *seq, void *v)
2381{
2382 struct tcp_iter_state *st = seq->private;
2383
2384 switch (st->state) {
2385 case TCP_SEQ_STATE_LISTENING:
2386 if (v != SEQ_START_TOKEN)
2387 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2388 break;
2389 case TCP_SEQ_STATE_ESTABLISHED:
2390 if (v)
2391 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2392 break;
2393 }
2394}
2395EXPORT_SYMBOL(tcp_seq_stop);
2396
2397static void get_openreq4(const struct request_sock *req,
2398 struct seq_file *f, int i)
2399{
2400 const struct inet_request_sock *ireq = inet_rsk(req);
2401 long delta = req->rsk_timer.expires - jiffies;
2402
2403 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2404 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2405 i,
2406 ireq->ir_loc_addr,
2407 ireq->ir_num,
2408 ireq->ir_rmt_addr,
2409 ntohs(ireq->ir_rmt_port),
2410 TCP_SYN_RECV,
2411 0, 0, /* could print option size, but that is af dependent. */
2412 1, /* timers active (only the expire timer) */
2413 jiffies_delta_to_clock_t(delta),
2414 req->num_timeout,
2415 from_kuid_munged(seq_user_ns(f),
2416 sock_i_uid(req->rsk_listener)),
2417 0, /* non standard timer */
2418 0, /* open_requests have no inode */
2419 0,
2420 req);
2421}
2422
2423static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2424{
2425 int timer_active;
2426 unsigned long timer_expires;
2427 const struct tcp_sock *tp = tcp_sk(sk);
2428 const struct inet_connection_sock *icsk = inet_csk(sk);
2429 const struct inet_sock *inet = inet_sk(sk);
2430 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2431 __be32 dest = inet->inet_daddr;
2432 __be32 src = inet->inet_rcv_saddr;
2433 __u16 destp = ntohs(inet->inet_dport);
2434 __u16 srcp = ntohs(inet->inet_sport);
2435 int rx_queue;
2436 int state;
2437
2438 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2439 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2440 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2441 timer_active = 1;
2442 timer_expires = icsk->icsk_timeout;
2443 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2444 timer_active = 4;
2445 timer_expires = icsk->icsk_timeout;
2446 } else if (timer_pending(&sk->sk_timer)) {
2447 timer_active = 2;
2448 timer_expires = sk->sk_timer.expires;
2449 } else {
2450 timer_active = 0;
2451 timer_expires = jiffies;
2452 }
2453
2454 state = inet_sk_state_load(sk);
2455 if (state == TCP_LISTEN)
2456 rx_queue = sk->sk_ack_backlog;
2457 else
2458 /* Because we don't lock the socket,
2459 * we might find a transient negative value.
2460 */
2461 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2462 READ_ONCE(tp->copied_seq), 0);
2463
2464 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2465 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2466 i, src, srcp, dest, destp, state,
2467 READ_ONCE(tp->write_seq) - tp->snd_una,
2468 rx_queue,
2469 timer_active,
2470 jiffies_delta_to_clock_t(timer_expires - jiffies),
2471 icsk->icsk_retransmits,
2472 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2473 icsk->icsk_probes_out,
2474 sock_i_ino(sk),
2475 refcount_read(&sk->sk_refcnt), sk,
2476 jiffies_to_clock_t(icsk->icsk_rto),
2477 jiffies_to_clock_t(icsk->icsk_ack.ato),
2478 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2479 tp->snd_cwnd,
2480 state == TCP_LISTEN ?
2481 fastopenq->max_qlen :
2482 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2483}
2484
2485static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2486 struct seq_file *f, int i)
2487{
2488 long delta = tw->tw_timer.expires - jiffies;
2489 __be32 dest, src;
2490 __u16 destp, srcp;
2491
2492 dest = tw->tw_daddr;
2493 src = tw->tw_rcv_saddr;
2494 destp = ntohs(tw->tw_dport);
2495 srcp = ntohs(tw->tw_sport);
2496
2497 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2498 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2499 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2500 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2501 refcount_read(&tw->tw_refcnt), tw);
2502}
2503
2504#define TMPSZ 150
2505
2506static int tcp4_seq_show(struct seq_file *seq, void *v)
2507{
2508 struct tcp_iter_state *st;
2509 struct sock *sk = v;
2510
2511 seq_setwidth(seq, TMPSZ - 1);
2512 if (v == SEQ_START_TOKEN) {
2513 seq_puts(seq, " sl local_address rem_address st tx_queue "
2514 "rx_queue tr tm->when retrnsmt uid timeout "
2515 "inode");
2516 goto out;
2517 }
2518 st = seq->private;
2519
2520 if (sk->sk_state == TCP_TIME_WAIT)
2521 get_timewait4_sock(v, seq, st->num);
2522 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2523 get_openreq4(v, seq, st->num);
2524 else
2525 get_tcp4_sock(v, seq, st->num);
2526out:
2527 seq_pad(seq, '\n');
2528 return 0;
2529}
2530
2531static const struct seq_operations tcp4_seq_ops = {
2532 .show = tcp4_seq_show,
2533 .start = tcp_seq_start,
2534 .next = tcp_seq_next,
2535 .stop = tcp_seq_stop,
2536};
2537
2538static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2539 .family = AF_INET,
2540};
2541
2542static int __net_init tcp4_proc_init_net(struct net *net)
2543{
2544 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2545 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
2546 return -ENOMEM;
2547 return 0;
2548}
2549
2550static void __net_exit tcp4_proc_exit_net(struct net *net)
2551{
2552 remove_proc_entry("tcp", net->proc_net);
2553}
2554
2555static struct pernet_operations tcp4_net_ops = {
2556 .init = tcp4_proc_init_net,
2557 .exit = tcp4_proc_exit_net,
2558};
2559
2560int __init tcp4_proc_init(void)
2561{
2562 return register_pernet_subsys(&tcp4_net_ops);
2563}
2564
2565void tcp4_proc_exit(void)
2566{
2567 unregister_pernet_subsys(&tcp4_net_ops);
2568}
2569#endif /* CONFIG_PROC_FS */
2570
2571struct proto tcp_prot = {
2572 .name = "TCP",
2573 .owner = THIS_MODULE,
2574 .close = tcp_close,
2575 .pre_connect = tcp_v4_pre_connect,
2576 .connect = tcp_v4_connect,
2577 .disconnect = tcp_disconnect,
2578 .accept = inet_csk_accept,
2579 .ioctl = tcp_ioctl,
2580 .init = tcp_v4_init_sock,
2581 .destroy = tcp_v4_destroy_sock,
2582 .shutdown = tcp_shutdown,
2583 .setsockopt = tcp_setsockopt,
2584 .getsockopt = tcp_getsockopt,
2585 .keepalive = tcp_set_keepalive,
2586 .recvmsg = tcp_recvmsg,
2587 .sendmsg = tcp_sendmsg,
2588 .sendpage = tcp_sendpage,
2589 .backlog_rcv = tcp_v4_do_rcv,
2590 .release_cb = tcp_release_cb,
2591 .hash = inet_hash,
2592 .unhash = inet_unhash,
2593 .get_port = inet_csk_get_port,
2594 .enter_memory_pressure = tcp_enter_memory_pressure,
2595 .leave_memory_pressure = tcp_leave_memory_pressure,
2596 .stream_memory_free = tcp_stream_memory_free,
2597 .sockets_allocated = &tcp_sockets_allocated,
2598 .orphan_count = &tcp_orphan_count,
2599 .memory_allocated = &tcp_memory_allocated,
2600 .memory_pressure = &tcp_memory_pressure,
2601 .sysctl_mem = sysctl_tcp_mem,
2602 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2603 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2604 .max_header = MAX_TCP_HEADER,
2605 .obj_size = sizeof(struct tcp_sock),
2606 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2607 .twsk_prot = &tcp_timewait_sock_ops,
2608 .rsk_prot = &tcp_request_sock_ops,
2609 .h.hashinfo = &tcp_hashinfo,
2610 .no_autobind = true,
2611#ifdef CONFIG_COMPAT
2612 .compat_setsockopt = compat_tcp_setsockopt,
2613 .compat_getsockopt = compat_tcp_getsockopt,
2614#endif
2615 .diag_destroy = tcp_abort,
2616};
2617EXPORT_SYMBOL(tcp_prot);
2618
2619static void __net_exit tcp_sk_exit(struct net *net)
2620{
2621 int cpu;
2622
2623 if (net->ipv4.tcp_congestion_control)
2624 module_put(net->ipv4.tcp_congestion_control->owner);
2625
2626 for_each_possible_cpu(cpu)
2627 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2628 free_percpu(net->ipv4.tcp_sk);
2629}
2630
2631static int __net_init tcp_sk_init(struct net *net)
2632{
2633 int res, cpu, cnt;
2634
2635 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2636 if (!net->ipv4.tcp_sk)
2637 return -ENOMEM;
2638
2639 for_each_possible_cpu(cpu) {
2640 struct sock *sk;
2641
2642 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2643 IPPROTO_TCP, net);
2644 if (res)
2645 goto fail;
2646 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2647
2648 /* Please enforce IP_DF and IPID==0 for RST and
2649 * ACK sent in SYN-RECV and TIME-WAIT state.
2650 */
2651 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2652
2653 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2654 }
2655
2656 net->ipv4.sysctl_tcp_ecn = 2;
2657 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2658
2659 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2660 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2661 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2662 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2663 net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
2664
2665 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2666 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2667 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2668
2669 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2670 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2671 net->ipv4.sysctl_tcp_syncookies = 1;
2672 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2673 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2674 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2675 net->ipv4.sysctl_tcp_orphan_retries = 0;
2676 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2677 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2678 net->ipv4.sysctl_tcp_tw_reuse = 2;
2679
2680 cnt = tcp_hashinfo.ehash_mask + 1;
2681 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
2682 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2683
2684 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
2685 net->ipv4.sysctl_tcp_sack = 1;
2686 net->ipv4.sysctl_tcp_window_scaling = 1;
2687 net->ipv4.sysctl_tcp_timestamps = 1;
2688 net->ipv4.sysctl_tcp_early_retrans = 3;
2689 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2690 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
2691 net->ipv4.sysctl_tcp_retrans_collapse = 1;
2692 net->ipv4.sysctl_tcp_max_reordering = 300;
2693 net->ipv4.sysctl_tcp_dsack = 1;
2694 net->ipv4.sysctl_tcp_app_win = 31;
2695 net->ipv4.sysctl_tcp_adv_win_scale = 1;
2696 net->ipv4.sysctl_tcp_frto = 2;
2697 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2698 /* This limits the percentage of the congestion window which we
2699 * will allow a single TSO frame to consume. Building TSO frames
2700 * which are too large can cause TCP streams to be bursty.
2701 */
2702 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2703 /* Default TSQ limit of 16 TSO segments */
2704 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
2705 /* rfc5961 challenge ack rate limiting */
2706 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2707 net->ipv4.sysctl_tcp_min_tso_segs = 2;
2708 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2709 net->ipv4.sysctl_tcp_autocorking = 1;
2710 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2711 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2712 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2713 if (net != &init_net) {
2714 memcpy(net->ipv4.sysctl_tcp_rmem,
2715 init_net.ipv4.sysctl_tcp_rmem,
2716 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2717 memcpy(net->ipv4.sysctl_tcp_wmem,
2718 init_net.ipv4.sysctl_tcp_wmem,
2719 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2720 }
2721 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
2722 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
2723 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2724 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2725 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2726 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2727
2728 /* Reno is always built in */
2729 if (!net_eq(net, &init_net) &&
2730 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2731 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2732 else
2733 net->ipv4.tcp_congestion_control = &tcp_reno;
2734
2735 return 0;
2736fail:
2737 tcp_sk_exit(net);
2738
2739 return res;
2740}
2741
2742static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2743{
2744 struct net *net;
2745
2746 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2747
2748 list_for_each_entry(net, net_exit_list, exit_list)
2749 tcp_fastopen_ctx_destroy(net);
2750}
2751
2752static struct pernet_operations __net_initdata tcp_sk_ops = {
2753 .init = tcp_sk_init,
2754 .exit = tcp_sk_exit,
2755 .exit_batch = tcp_sk_exit_batch,
2756};
2757
2758void __init tcp_v4_init(void)
2759{
2760 if (register_pernet_subsys(&tcp_sk_ops))
2761 panic("Failed to create the TCP control socket.\n");
2762}
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53#define pr_fmt(fmt) "TCP: " fmt
54
55#include <linux/bottom_half.h>
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
64#include <linux/slab.h>
65
66#include <net/net_namespace.h>
67#include <net/icmp.h>
68#include <net/inet_hashtables.h>
69#include <net/tcp.h>
70#include <net/transp_v6.h>
71#include <net/ipv6.h>
72#include <net/inet_common.h>
73#include <net/timewait_sock.h>
74#include <net/xfrm.h>
75#include <net/netdma.h>
76#include <net/secure_seq.h>
77#include <net/tcp_memcontrol.h>
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
90EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92
93#ifdef CONFIG_TCP_MD5SIG
94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
96#endif
97
98struct inet_hashinfo tcp_hashinfo;
99EXPORT_SYMBOL(tcp_hashinfo);
100
101static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
102{
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
107}
108
109int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110{
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138}
139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
141static int tcp_repair_connect(struct sock *sk)
142{
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
145
146 return 0;
147}
148
149/* This will initiate an outgoing connection. */
150int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
151{
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
155 __be16 orig_sport, orig_dport;
156 __be32 daddr, nexthop;
157 struct flowi4 *fl4;
158 struct rtable *rt;
159 int err;
160 struct ip_options_rcu *inet_opt;
161
162 if (addr_len < sizeof(struct sockaddr_in))
163 return -EINVAL;
164
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
167
168 nexthop = daddr = usin->sin_addr.s_addr;
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
172 if (!daddr)
173 return -EINVAL;
174 nexthop = inet_opt->opt.faddr;
175 }
176
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
182 IPPROTO_TCP,
183 orig_sport, orig_dport, sk, true);
184 if (IS_ERR(rt)) {
185 err = PTR_ERR(rt);
186 if (err == -ENETUNREACH)
187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
188 return err;
189 }
190
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
192 ip_rt_put(rt);
193 return -ENETUNREACH;
194 }
195
196 if (!inet_opt || !inet_opt->opt.srr)
197 daddr = fl4->daddr;
198
199 if (!inet->inet_saddr)
200 inet->inet_saddr = fl4->saddr;
201 inet->inet_rcv_saddr = inet->inet_saddr;
202
203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
207 if (likely(!tp->repair))
208 tp->write_seq = 0;
209 }
210
211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
213 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
214 /*
215 * VJ's idea. We save last timestamp seen from
216 * the destination in peer table, when entering state
217 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
218 * when trying new connection.
219 */
220 if (peer) {
221 inet_peer_refcheck(peer);
222 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
223 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
224 tp->rx_opt.ts_recent = peer->tcp_ts;
225 }
226 }
227 }
228
229 inet->inet_dport = usin->sin_port;
230 inet->inet_daddr = daddr;
231
232 inet_csk(sk)->icsk_ext_hdr_len = 0;
233 if (inet_opt)
234 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
235
236 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
237
238 /* Socket identity is still unknown (sport may be zero).
239 * However we set state to SYN-SENT and not releasing socket
240 * lock select source port, enter ourselves into the hash tables and
241 * complete initialization after this.
242 */
243 tcp_set_state(sk, TCP_SYN_SENT);
244 err = inet_hash_connect(&tcp_death_row, sk);
245 if (err)
246 goto failure;
247
248 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
249 inet->inet_sport, inet->inet_dport, sk);
250 if (IS_ERR(rt)) {
251 err = PTR_ERR(rt);
252 rt = NULL;
253 goto failure;
254 }
255 /* OK, now commit destination to socket. */
256 sk->sk_gso_type = SKB_GSO_TCPV4;
257 sk_setup_caps(sk, &rt->dst);
258
259 if (!tp->write_seq && likely(!tp->repair))
260 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
261 inet->inet_daddr,
262 inet->inet_sport,
263 usin->sin_port);
264
265 inet->inet_id = tp->write_seq ^ jiffies;
266
267 if (likely(!tp->repair))
268 err = tcp_connect(sk);
269 else
270 err = tcp_repair_connect(sk);
271
272 rt = NULL;
273 if (err)
274 goto failure;
275
276 return 0;
277
278failure:
279 /*
280 * This unhashes the socket and releases the local port,
281 * if necessary.
282 */
283 tcp_set_state(sk, TCP_CLOSE);
284 ip_rt_put(rt);
285 sk->sk_route_caps = 0;
286 inet->inet_dport = 0;
287 return err;
288}
289EXPORT_SYMBOL(tcp_v4_connect);
290
291/*
292 * This routine does path mtu discovery as defined in RFC1191.
293 */
294static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
295{
296 struct dst_entry *dst;
297 struct inet_sock *inet = inet_sk(sk);
298
299 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
300 * send out by Linux are always <576bytes so they should go through
301 * unfragmented).
302 */
303 if (sk->sk_state == TCP_LISTEN)
304 return;
305
306 /* We don't check in the destentry if pmtu discovery is forbidden
307 * on this route. We just assume that no packet_to_big packets
308 * are send back when pmtu discovery is not active.
309 * There is a small race when the user changes this flag in the
310 * route, but I think that's acceptable.
311 */
312 if ((dst = __sk_dst_check(sk, 0)) == NULL)
313 return;
314
315 dst->ops->update_pmtu(dst, mtu);
316
317 /* Something is about to be wrong... Remember soft error
318 * for the case, if this connection will not able to recover.
319 */
320 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
321 sk->sk_err_soft = EMSGSIZE;
322
323 mtu = dst_mtu(dst);
324
325 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
326 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
327 tcp_sync_mss(sk, mtu);
328
329 /* Resend the TCP packet because it's
330 * clear that the old packet has been
331 * dropped. This is the new "fast" path mtu
332 * discovery.
333 */
334 tcp_simple_retransmit(sk);
335 } /* else let the usual retransmit timer handle it */
336}
337
338/*
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
345 *
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
351 *
352 */
353
354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
355{
356 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
357 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
358 struct inet_connection_sock *icsk;
359 struct tcp_sock *tp;
360 struct inet_sock *inet;
361 const int type = icmp_hdr(icmp_skb)->type;
362 const int code = icmp_hdr(icmp_skb)->code;
363 struct sock *sk;
364 struct sk_buff *skb;
365 __u32 seq;
366 __u32 remaining;
367 int err;
368 struct net *net = dev_net(icmp_skb->dev);
369
370 if (icmp_skb->len < (iph->ihl << 2) + 8) {
371 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
372 return;
373 }
374
375 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
376 iph->saddr, th->source, inet_iif(icmp_skb));
377 if (!sk) {
378 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 return;
380 }
381 if (sk->sk_state == TCP_TIME_WAIT) {
382 inet_twsk_put(inet_twsk(sk));
383 return;
384 }
385
386 bh_lock_sock(sk);
387 /* If too many ICMPs get dropped on busy
388 * servers this needs to be solved differently.
389 */
390 if (sock_owned_by_user(sk))
391 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
392
393 if (sk->sk_state == TCP_CLOSE)
394 goto out;
395
396 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
397 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
398 goto out;
399 }
400
401 icsk = inet_csk(sk);
402 tp = tcp_sk(sk);
403 seq = ntohl(th->seq);
404 if (sk->sk_state != TCP_LISTEN &&
405 !between(seq, tp->snd_una, tp->snd_nxt)) {
406 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
407 goto out;
408 }
409
410 switch (type) {
411 case ICMP_SOURCE_QUENCH:
412 /* Just silently ignore these. */
413 goto out;
414 case ICMP_PARAMETERPROB:
415 err = EPROTO;
416 break;
417 case ICMP_DEST_UNREACH:
418 if (code > NR_ICMP_UNREACH)
419 goto out;
420
421 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
422 if (!sock_owned_by_user(sk))
423 do_pmtu_discovery(sk, iph, info);
424 goto out;
425 }
426
427 err = icmp_err_convert[code].errno;
428 /* check if icmp_skb allows revert of backoff
429 * (see draft-zimmermann-tcp-lcd) */
430 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
431 break;
432 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
433 !icsk->icsk_backoff)
434 break;
435
436 if (sock_owned_by_user(sk))
437 break;
438
439 icsk->icsk_backoff--;
440 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
441 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
442 tcp_bound_rto(sk);
443
444 skb = tcp_write_queue_head(sk);
445 BUG_ON(!skb);
446
447 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
448 tcp_time_stamp - TCP_SKB_CB(skb)->when);
449
450 if (remaining) {
451 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
452 remaining, TCP_RTO_MAX);
453 } else {
454 /* RTO revert clocked out retransmission.
455 * Will retransmit now */
456 tcp_retransmit_timer(sk);
457 }
458
459 break;
460 case ICMP_TIME_EXCEEDED:
461 err = EHOSTUNREACH;
462 break;
463 default:
464 goto out;
465 }
466
467 switch (sk->sk_state) {
468 struct request_sock *req, **prev;
469 case TCP_LISTEN:
470 if (sock_owned_by_user(sk))
471 goto out;
472
473 req = inet_csk_search_req(sk, &prev, th->dest,
474 iph->daddr, iph->saddr);
475 if (!req)
476 goto out;
477
478 /* ICMPs are not backlogged, hence we cannot get
479 an established socket here.
480 */
481 WARN_ON(req->sk);
482
483 if (seq != tcp_rsk(req)->snt_isn) {
484 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
485 goto out;
486 }
487
488 /*
489 * Still in SYN_RECV, just remove it silently.
490 * There is no good way to pass the error to the newly
491 * created socket, and POSIX does not want network
492 * errors returned from accept().
493 */
494 inet_csk_reqsk_queue_drop(sk, req, prev);
495 goto out;
496
497 case TCP_SYN_SENT:
498 case TCP_SYN_RECV: /* Cannot happen.
499 It can f.e. if SYNs crossed.
500 */
501 if (!sock_owned_by_user(sk)) {
502 sk->sk_err = err;
503
504 sk->sk_error_report(sk);
505
506 tcp_done(sk);
507 } else {
508 sk->sk_err_soft = err;
509 }
510 goto out;
511 }
512
513 /* If we've already connected we will keep trying
514 * until we time out, or the user gives up.
515 *
516 * rfc1122 4.2.3.9 allows to consider as hard errors
517 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 * but it is obsoleted by pmtu discovery).
519 *
520 * Note, that in modern internet, where routing is unreliable
521 * and in each dark corner broken firewalls sit, sending random
522 * errors ordered by their masters even this two messages finally lose
523 * their original sense (even Linux sends invalid PORT_UNREACHs)
524 *
525 * Now we are in compliance with RFCs.
526 * --ANK (980905)
527 */
528
529 inet = inet_sk(sk);
530 if (!sock_owned_by_user(sk) && inet->recverr) {
531 sk->sk_err = err;
532 sk->sk_error_report(sk);
533 } else { /* Only an error on timeout */
534 sk->sk_err_soft = err;
535 }
536
537out:
538 bh_unlock_sock(sk);
539 sock_put(sk);
540}
541
542static void __tcp_v4_send_check(struct sk_buff *skb,
543 __be32 saddr, __be32 daddr)
544{
545 struct tcphdr *th = tcp_hdr(skb);
546
547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
551 } else {
552 th->check = tcp_v4_check(skb->len, saddr, daddr,
553 csum_partial(th,
554 th->doff << 2,
555 skb->csum));
556 }
557}
558
559/* This routine computes an IPv4 TCP checksum. */
560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
561{
562 const struct inet_sock *inet = inet_sk(sk);
563
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565}
566EXPORT_SYMBOL(tcp_v4_send_check);
567
568int tcp_v4_gso_send_check(struct sk_buff *skb)
569{
570 const struct iphdr *iph;
571 struct tcphdr *th;
572
573 if (!pskb_may_pull(skb, sizeof(*th)))
574 return -EINVAL;
575
576 iph = ip_hdr(skb);
577 th = tcp_hdr(skb);
578
579 th->check = 0;
580 skb->ip_summed = CHECKSUM_PARTIAL;
581 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
582 return 0;
583}
584
585/*
586 * This routine will send an RST to the other tcp.
587 *
588 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589 * for reset.
590 * Answer: if a packet caused RST, it is not for a socket
591 * existing in our system, if it is matched to a socket,
592 * it is just duplicate segment or bug in other side's TCP.
593 * So that we build reply only basing on parameters
594 * arrived with segment.
595 * Exception: precedence violation. We do not implement it in any case.
596 */
597
598static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
599{
600 const struct tcphdr *th = tcp_hdr(skb);
601 struct {
602 struct tcphdr th;
603#ifdef CONFIG_TCP_MD5SIG
604 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
605#endif
606 } rep;
607 struct ip_reply_arg arg;
608#ifdef CONFIG_TCP_MD5SIG
609 struct tcp_md5sig_key *key;
610 const __u8 *hash_location = NULL;
611 unsigned char newhash[16];
612 int genhash;
613 struct sock *sk1 = NULL;
614#endif
615 struct net *net;
616
617 /* Never send a reset in response to a reset. */
618 if (th->rst)
619 return;
620
621 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
622 return;
623
624 /* Swap the send and the receive. */
625 memset(&rep, 0, sizeof(rep));
626 rep.th.dest = th->source;
627 rep.th.source = th->dest;
628 rep.th.doff = sizeof(struct tcphdr) / 4;
629 rep.th.rst = 1;
630
631 if (th->ack) {
632 rep.th.seq = th->ack_seq;
633 } else {
634 rep.th.ack = 1;
635 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
636 skb->len - (th->doff << 2));
637 }
638
639 memset(&arg, 0, sizeof(arg));
640 arg.iov[0].iov_base = (unsigned char *)&rep;
641 arg.iov[0].iov_len = sizeof(rep.th);
642
643#ifdef CONFIG_TCP_MD5SIG
644 hash_location = tcp_parse_md5sig_option(th);
645 if (!sk && hash_location) {
646 /*
647 * active side is lost. Try to find listening socket through
648 * source port, and then find md5 key through listening socket.
649 * we are not loose security here:
650 * Incoming packet is checked with md5 hash with finding key,
651 * no RST generated if md5 hash doesn't match.
652 */
653 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
654 &tcp_hashinfo, ip_hdr(skb)->daddr,
655 ntohs(th->source), inet_iif(skb));
656 /* don't send rst if it can't find key */
657 if (!sk1)
658 return;
659 rcu_read_lock();
660 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 &ip_hdr(skb)->saddr, AF_INET);
662 if (!key)
663 goto release_sk1;
664
665 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
666 if (genhash || memcmp(hash_location, newhash, 16) != 0)
667 goto release_sk1;
668 } else {
669 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
670 &ip_hdr(skb)->saddr,
671 AF_INET) : NULL;
672 }
673
674 if (key) {
675 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
676 (TCPOPT_NOP << 16) |
677 (TCPOPT_MD5SIG << 8) |
678 TCPOLEN_MD5SIG);
679 /* Update length and the length the header thinks exists */
680 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
681 rep.th.doff = arg.iov[0].iov_len / 4;
682
683 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
684 key, ip_hdr(skb)->saddr,
685 ip_hdr(skb)->daddr, &rep.th);
686 }
687#endif
688 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
689 ip_hdr(skb)->saddr, /* XXX */
690 arg.iov[0].iov_len, IPPROTO_TCP, 0);
691 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
692 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
693 /* When socket is gone, all binding information is lost.
694 * routing might fail in this case. using iif for oif to
695 * make sure we can deliver it
696 */
697 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
698
699 net = dev_net(skb_dst(skb)->dev);
700 arg.tos = ip_hdr(skb)->tos;
701 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
702 &arg, arg.iov[0].iov_len);
703
704 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
705 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
706
707#ifdef CONFIG_TCP_MD5SIG
708release_sk1:
709 if (sk1) {
710 rcu_read_unlock();
711 sock_put(sk1);
712 }
713#endif
714}
715
716/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
717 outside socket context is ugly, certainly. What can I do?
718 */
719
720static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
721 u32 win, u32 ts, int oif,
722 struct tcp_md5sig_key *key,
723 int reply_flags, u8 tos)
724{
725 const struct tcphdr *th = tcp_hdr(skb);
726 struct {
727 struct tcphdr th;
728 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
729#ifdef CONFIG_TCP_MD5SIG
730 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
731#endif
732 ];
733 } rep;
734 struct ip_reply_arg arg;
735 struct net *net = dev_net(skb_dst(skb)->dev);
736
737 memset(&rep.th, 0, sizeof(struct tcphdr));
738 memset(&arg, 0, sizeof(arg));
739
740 arg.iov[0].iov_base = (unsigned char *)&rep;
741 arg.iov[0].iov_len = sizeof(rep.th);
742 if (ts) {
743 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
744 (TCPOPT_TIMESTAMP << 8) |
745 TCPOLEN_TIMESTAMP);
746 rep.opt[1] = htonl(tcp_time_stamp);
747 rep.opt[2] = htonl(ts);
748 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
749 }
750
751 /* Swap the send and the receive. */
752 rep.th.dest = th->source;
753 rep.th.source = th->dest;
754 rep.th.doff = arg.iov[0].iov_len / 4;
755 rep.th.seq = htonl(seq);
756 rep.th.ack_seq = htonl(ack);
757 rep.th.ack = 1;
758 rep.th.window = htons(win);
759
760#ifdef CONFIG_TCP_MD5SIG
761 if (key) {
762 int offset = (ts) ? 3 : 0;
763
764 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
765 (TCPOPT_NOP << 16) |
766 (TCPOPT_MD5SIG << 8) |
767 TCPOLEN_MD5SIG);
768 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
769 rep.th.doff = arg.iov[0].iov_len/4;
770
771 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
772 key, ip_hdr(skb)->saddr,
773 ip_hdr(skb)->daddr, &rep.th);
774 }
775#endif
776 arg.flags = reply_flags;
777 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
778 ip_hdr(skb)->saddr, /* XXX */
779 arg.iov[0].iov_len, IPPROTO_TCP, 0);
780 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
781 if (oif)
782 arg.bound_dev_if = oif;
783 arg.tos = tos;
784 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
785 &arg, arg.iov[0].iov_len);
786
787 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
788}
789
790static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
791{
792 struct inet_timewait_sock *tw = inet_twsk(sk);
793 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
794
795 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
796 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
797 tcptw->tw_ts_recent,
798 tw->tw_bound_dev_if,
799 tcp_twsk_md5_key(tcptw),
800 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
801 tw->tw_tos
802 );
803
804 inet_twsk_put(tw);
805}
806
807static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
808 struct request_sock *req)
809{
810 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
811 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
812 req->ts_recent,
813 0,
814 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
815 AF_INET),
816 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
817 ip_hdr(skb)->tos);
818}
819
820/*
821 * Send a SYN-ACK after having received a SYN.
822 * This still operates on a request_sock only, not on a big
823 * socket.
824 */
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req,
827 struct request_values *rvp,
828 u16 queue_mapping)
829{
830 const struct inet_request_sock *ireq = inet_rsk(req);
831 struct flowi4 fl4;
832 int err = -1;
833 struct sk_buff * skb;
834
835 /* First, grab a route. */
836 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
837 return -1;
838
839 skb = tcp_make_synack(sk, dst, req, rvp);
840
841 if (skb) {
842 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
843
844 skb_set_queue_mapping(skb, queue_mapping);
845 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
846 ireq->rmt_addr,
847 ireq->opt);
848 err = net_xmit_eval(err);
849 }
850
851 dst_release(dst);
852 return err;
853}
854
855static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
856 struct request_values *rvp)
857{
858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
860}
861
862/*
863 * IPv4 request_sock destructor.
864 */
865static void tcp_v4_reqsk_destructor(struct request_sock *req)
866{
867 kfree(inet_rsk(req)->opt);
868}
869
870/*
871 * Return true if a syncookie should be sent
872 */
873bool tcp_syn_flood_action(struct sock *sk,
874 const struct sk_buff *skb,
875 const char *proto)
876{
877 const char *msg = "Dropping request";
878 bool want_cookie = false;
879 struct listen_sock *lopt;
880
881
882
883#ifdef CONFIG_SYN_COOKIES
884 if (sysctl_tcp_syncookies) {
885 msg = "Sending cookies";
886 want_cookie = true;
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
888 } else
889#endif
890 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
891
892 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
893 if (!lopt->synflood_warned) {
894 lopt->synflood_warned = 1;
895 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
896 proto, ntohs(tcp_hdr(skb)->dest), msg);
897 }
898 return want_cookie;
899}
900EXPORT_SYMBOL(tcp_syn_flood_action);
901
902/*
903 * Save and compile IPv4 options into the request_sock if needed.
904 */
905static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
906 struct sk_buff *skb)
907{
908 const struct ip_options *opt = &(IPCB(skb)->opt);
909 struct ip_options_rcu *dopt = NULL;
910
911 if (opt && opt->optlen) {
912 int opt_size = sizeof(*dopt) + opt->optlen;
913
914 dopt = kmalloc(opt_size, GFP_ATOMIC);
915 if (dopt) {
916 if (ip_options_echo(&dopt->opt, skb)) {
917 kfree(dopt);
918 dopt = NULL;
919 }
920 }
921 }
922 return dopt;
923}
924
925#ifdef CONFIG_TCP_MD5SIG
926/*
927 * RFC2385 MD5 checksumming requires a mapping of
928 * IP address->MD5 Key.
929 * We need to maintain these in the sk structure.
930 */
931
932/* Find the Key structure for an address. */
933struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
934 const union tcp_md5_addr *addr,
935 int family)
936{
937 struct tcp_sock *tp = tcp_sk(sk);
938 struct tcp_md5sig_key *key;
939 struct hlist_node *pos;
940 unsigned int size = sizeof(struct in_addr);
941 struct tcp_md5sig_info *md5sig;
942
943 /* caller either holds rcu_read_lock() or socket lock */
944 md5sig = rcu_dereference_check(tp->md5sig_info,
945 sock_owned_by_user(sk) ||
946 lockdep_is_held(&sk->sk_lock.slock));
947 if (!md5sig)
948 return NULL;
949#if IS_ENABLED(CONFIG_IPV6)
950 if (family == AF_INET6)
951 size = sizeof(struct in6_addr);
952#endif
953 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
954 if (key->family != family)
955 continue;
956 if (!memcmp(&key->addr, addr, size))
957 return key;
958 }
959 return NULL;
960}
961EXPORT_SYMBOL(tcp_md5_do_lookup);
962
963struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
964 struct sock *addr_sk)
965{
966 union tcp_md5_addr *addr;
967
968 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
969 return tcp_md5_do_lookup(sk, addr, AF_INET);
970}
971EXPORT_SYMBOL(tcp_v4_md5_lookup);
972
973static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
974 struct request_sock *req)
975{
976 union tcp_md5_addr *addr;
977
978 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
979 return tcp_md5_do_lookup(sk, addr, AF_INET);
980}
981
982/* This can be called on a newly created socket, from other files */
983int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
984 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
985{
986 /* Add Key to the list */
987 struct tcp_md5sig_key *key;
988 struct tcp_sock *tp = tcp_sk(sk);
989 struct tcp_md5sig_info *md5sig;
990
991 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
992 if (key) {
993 /* Pre-existing entry - just update that one. */
994 memcpy(key->key, newkey, newkeylen);
995 key->keylen = newkeylen;
996 return 0;
997 }
998
999 md5sig = rcu_dereference_protected(tp->md5sig_info,
1000 sock_owned_by_user(sk));
1001 if (!md5sig) {
1002 md5sig = kmalloc(sizeof(*md5sig), gfp);
1003 if (!md5sig)
1004 return -ENOMEM;
1005
1006 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1007 INIT_HLIST_HEAD(&md5sig->head);
1008 rcu_assign_pointer(tp->md5sig_info, md5sig);
1009 }
1010
1011 key = sock_kmalloc(sk, sizeof(*key), gfp);
1012 if (!key)
1013 return -ENOMEM;
1014 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1015 sock_kfree_s(sk, key, sizeof(*key));
1016 return -ENOMEM;
1017 }
1018
1019 memcpy(key->key, newkey, newkeylen);
1020 key->keylen = newkeylen;
1021 key->family = family;
1022 memcpy(&key->addr, addr,
1023 (family == AF_INET6) ? sizeof(struct in6_addr) :
1024 sizeof(struct in_addr));
1025 hlist_add_head_rcu(&key->node, &md5sig->head);
1026 return 0;
1027}
1028EXPORT_SYMBOL(tcp_md5_do_add);
1029
1030int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1031{
1032 struct tcp_sock *tp = tcp_sk(sk);
1033 struct tcp_md5sig_key *key;
1034 struct tcp_md5sig_info *md5sig;
1035
1036 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1037 if (!key)
1038 return -ENOENT;
1039 hlist_del_rcu(&key->node);
1040 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1041 kfree_rcu(key, rcu);
1042 md5sig = rcu_dereference_protected(tp->md5sig_info,
1043 sock_owned_by_user(sk));
1044 if (hlist_empty(&md5sig->head))
1045 tcp_free_md5sig_pool();
1046 return 0;
1047}
1048EXPORT_SYMBOL(tcp_md5_do_del);
1049
1050void tcp_clear_md5_list(struct sock *sk)
1051{
1052 struct tcp_sock *tp = tcp_sk(sk);
1053 struct tcp_md5sig_key *key;
1054 struct hlist_node *pos, *n;
1055 struct tcp_md5sig_info *md5sig;
1056
1057 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1058
1059 if (!hlist_empty(&md5sig->head))
1060 tcp_free_md5sig_pool();
1061 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1062 hlist_del_rcu(&key->node);
1063 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1064 kfree_rcu(key, rcu);
1065 }
1066}
1067
1068static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1069 int optlen)
1070{
1071 struct tcp_md5sig cmd;
1072 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1073
1074 if (optlen < sizeof(cmd))
1075 return -EINVAL;
1076
1077 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1078 return -EFAULT;
1079
1080 if (sin->sin_family != AF_INET)
1081 return -EINVAL;
1082
1083 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1084 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1085 AF_INET);
1086
1087 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1088 return -EINVAL;
1089
1090 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1091 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1092 GFP_KERNEL);
1093}
1094
1095static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1096 __be32 daddr, __be32 saddr, int nbytes)
1097{
1098 struct tcp4_pseudohdr *bp;
1099 struct scatterlist sg;
1100
1101 bp = &hp->md5_blk.ip4;
1102
1103 /*
1104 * 1. the TCP pseudo-header (in the order: source IP address,
1105 * destination IP address, zero-padded protocol number, and
1106 * segment length)
1107 */
1108 bp->saddr = saddr;
1109 bp->daddr = daddr;
1110 bp->pad = 0;
1111 bp->protocol = IPPROTO_TCP;
1112 bp->len = cpu_to_be16(nbytes);
1113
1114 sg_init_one(&sg, bp, sizeof(*bp));
1115 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1116}
1117
1118static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1119 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1120{
1121 struct tcp_md5sig_pool *hp;
1122 struct hash_desc *desc;
1123
1124 hp = tcp_get_md5sig_pool();
1125 if (!hp)
1126 goto clear_hash_noput;
1127 desc = &hp->md5_desc;
1128
1129 if (crypto_hash_init(desc))
1130 goto clear_hash;
1131 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1132 goto clear_hash;
1133 if (tcp_md5_hash_header(hp, th))
1134 goto clear_hash;
1135 if (tcp_md5_hash_key(hp, key))
1136 goto clear_hash;
1137 if (crypto_hash_final(desc, md5_hash))
1138 goto clear_hash;
1139
1140 tcp_put_md5sig_pool();
1141 return 0;
1142
1143clear_hash:
1144 tcp_put_md5sig_pool();
1145clear_hash_noput:
1146 memset(md5_hash, 0, 16);
1147 return 1;
1148}
1149
1150int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1151 const struct sock *sk, const struct request_sock *req,
1152 const struct sk_buff *skb)
1153{
1154 struct tcp_md5sig_pool *hp;
1155 struct hash_desc *desc;
1156 const struct tcphdr *th = tcp_hdr(skb);
1157 __be32 saddr, daddr;
1158
1159 if (sk) {
1160 saddr = inet_sk(sk)->inet_saddr;
1161 daddr = inet_sk(sk)->inet_daddr;
1162 } else if (req) {
1163 saddr = inet_rsk(req)->loc_addr;
1164 daddr = inet_rsk(req)->rmt_addr;
1165 } else {
1166 const struct iphdr *iph = ip_hdr(skb);
1167 saddr = iph->saddr;
1168 daddr = iph->daddr;
1169 }
1170
1171 hp = tcp_get_md5sig_pool();
1172 if (!hp)
1173 goto clear_hash_noput;
1174 desc = &hp->md5_desc;
1175
1176 if (crypto_hash_init(desc))
1177 goto clear_hash;
1178
1179 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1180 goto clear_hash;
1181 if (tcp_md5_hash_header(hp, th))
1182 goto clear_hash;
1183 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1184 goto clear_hash;
1185 if (tcp_md5_hash_key(hp, key))
1186 goto clear_hash;
1187 if (crypto_hash_final(desc, md5_hash))
1188 goto clear_hash;
1189
1190 tcp_put_md5sig_pool();
1191 return 0;
1192
1193clear_hash:
1194 tcp_put_md5sig_pool();
1195clear_hash_noput:
1196 memset(md5_hash, 0, 16);
1197 return 1;
1198}
1199EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1200
1201static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1202{
1203 /*
1204 * This gets called for each TCP segment that arrives
1205 * so we want to be efficient.
1206 * We have 3 drop cases:
1207 * o No MD5 hash and one expected.
1208 * o MD5 hash and we're not expecting one.
1209 * o MD5 hash and its wrong.
1210 */
1211 const __u8 *hash_location = NULL;
1212 struct tcp_md5sig_key *hash_expected;
1213 const struct iphdr *iph = ip_hdr(skb);
1214 const struct tcphdr *th = tcp_hdr(skb);
1215 int genhash;
1216 unsigned char newhash[16];
1217
1218 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1219 AF_INET);
1220 hash_location = tcp_parse_md5sig_option(th);
1221
1222 /* We've parsed the options - do we have a hash? */
1223 if (!hash_expected && !hash_location)
1224 return false;
1225
1226 if (hash_expected && !hash_location) {
1227 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1228 return true;
1229 }
1230
1231 if (!hash_expected && hash_location) {
1232 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1233 return true;
1234 }
1235
1236 /* Okay, so this is hash_expected and hash_location -
1237 * so we need to calculate the checksum.
1238 */
1239 genhash = tcp_v4_md5_hash_skb(newhash,
1240 hash_expected,
1241 NULL, NULL, skb);
1242
1243 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1244 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1245 &iph->saddr, ntohs(th->source),
1246 &iph->daddr, ntohs(th->dest),
1247 genhash ? " tcp_v4_calc_md5_hash failed"
1248 : "");
1249 return true;
1250 }
1251 return false;
1252}
1253
1254#endif
1255
1256struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1257 .family = PF_INET,
1258 .obj_size = sizeof(struct tcp_request_sock),
1259 .rtx_syn_ack = tcp_v4_rtx_synack,
1260 .send_ack = tcp_v4_reqsk_send_ack,
1261 .destructor = tcp_v4_reqsk_destructor,
1262 .send_reset = tcp_v4_send_reset,
1263 .syn_ack_timeout = tcp_syn_ack_timeout,
1264};
1265
1266#ifdef CONFIG_TCP_MD5SIG
1267static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1268 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1269 .calc_md5_hash = tcp_v4_md5_hash_skb,
1270};
1271#endif
1272
1273int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1274{
1275 struct tcp_extend_values tmp_ext;
1276 struct tcp_options_received tmp_opt;
1277 const u8 *hash_location;
1278 struct request_sock *req;
1279 struct inet_request_sock *ireq;
1280 struct tcp_sock *tp = tcp_sk(sk);
1281 struct dst_entry *dst = NULL;
1282 __be32 saddr = ip_hdr(skb)->saddr;
1283 __be32 daddr = ip_hdr(skb)->daddr;
1284 __u32 isn = TCP_SKB_CB(skb)->when;
1285 bool want_cookie = false;
1286
1287 /* Never answer to SYNs send to broadcast or multicast */
1288 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1289 goto drop;
1290
1291 /* TW buckets are converted to open requests without
1292 * limitations, they conserve resources and peer is
1293 * evidently real one.
1294 */
1295 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1296 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1297 if (!want_cookie)
1298 goto drop;
1299 }
1300
1301 /* Accept backlog is full. If we have already queued enough
1302 * of warm entries in syn queue, drop request. It is better than
1303 * clogging syn queue with openreqs with exponentially increasing
1304 * timeout.
1305 */
1306 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1307 goto drop;
1308
1309 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1310 if (!req)
1311 goto drop;
1312
1313#ifdef CONFIG_TCP_MD5SIG
1314 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1315#endif
1316
1317 tcp_clear_options(&tmp_opt);
1318 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1319 tmp_opt.user_mss = tp->rx_opt.user_mss;
1320 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1321
1322 if (tmp_opt.cookie_plus > 0 &&
1323 tmp_opt.saw_tstamp &&
1324 !tp->rx_opt.cookie_out_never &&
1325 (sysctl_tcp_cookie_size > 0 ||
1326 (tp->cookie_values != NULL &&
1327 tp->cookie_values->cookie_desired > 0))) {
1328 u8 *c;
1329 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1330 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1331
1332 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1333 goto drop_and_release;
1334
1335 /* Secret recipe starts with IP addresses */
1336 *mess++ ^= (__force u32)daddr;
1337 *mess++ ^= (__force u32)saddr;
1338
1339 /* plus variable length Initiator Cookie */
1340 c = (u8 *)mess;
1341 while (l-- > 0)
1342 *c++ ^= *hash_location++;
1343
1344 want_cookie = false; /* not our kind of cookie */
1345 tmp_ext.cookie_out_never = 0; /* false */
1346 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1347 } else if (!tp->rx_opt.cookie_in_always) {
1348 /* redundant indications, but ensure initialization. */
1349 tmp_ext.cookie_out_never = 1; /* true */
1350 tmp_ext.cookie_plus = 0;
1351 } else {
1352 goto drop_and_release;
1353 }
1354 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1355
1356 if (want_cookie && !tmp_opt.saw_tstamp)
1357 tcp_clear_options(&tmp_opt);
1358
1359 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1360 tcp_openreq_init(req, &tmp_opt, skb);
1361
1362 ireq = inet_rsk(req);
1363 ireq->loc_addr = daddr;
1364 ireq->rmt_addr = saddr;
1365 ireq->no_srccheck = inet_sk(sk)->transparent;
1366 ireq->opt = tcp_v4_save_options(sk, skb);
1367
1368 if (security_inet_conn_request(sk, skb, req))
1369 goto drop_and_free;
1370
1371 if (!want_cookie || tmp_opt.tstamp_ok)
1372 TCP_ECN_create_request(req, skb);
1373
1374 if (want_cookie) {
1375 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1376 req->cookie_ts = tmp_opt.tstamp_ok;
1377 } else if (!isn) {
1378 struct inet_peer *peer = NULL;
1379 struct flowi4 fl4;
1380
1381 /* VJ's idea. We save last timestamp seen
1382 * from the destination in peer table, when entering
1383 * state TIME-WAIT, and check against it before
1384 * accepting new connection request.
1385 *
1386 * If "isn" is not zero, this request hit alive
1387 * timewait bucket, so that all the necessary checks
1388 * are made in the function processing timewait state.
1389 */
1390 if (tmp_opt.saw_tstamp &&
1391 tcp_death_row.sysctl_tw_recycle &&
1392 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1393 fl4.daddr == saddr &&
1394 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1395 inet_peer_refcheck(peer);
1396 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1397 (s32)(peer->tcp_ts - req->ts_recent) >
1398 TCP_PAWS_WINDOW) {
1399 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1400 goto drop_and_release;
1401 }
1402 }
1403 /* Kill the following clause, if you dislike this way. */
1404 else if (!sysctl_tcp_syncookies &&
1405 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1406 (sysctl_max_syn_backlog >> 2)) &&
1407 (!peer || !peer->tcp_ts_stamp) &&
1408 (!dst || !dst_metric(dst, RTAX_RTT))) {
1409 /* Without syncookies last quarter of
1410 * backlog is filled with destinations,
1411 * proven to be alive.
1412 * It means that we continue to communicate
1413 * to destinations, already remembered
1414 * to the moment of synflood.
1415 */
1416 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1417 &saddr, ntohs(tcp_hdr(skb)->source));
1418 goto drop_and_release;
1419 }
1420
1421 isn = tcp_v4_init_sequence(skb);
1422 }
1423 tcp_rsk(req)->snt_isn = isn;
1424 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1425
1426 if (tcp_v4_send_synack(sk, dst, req,
1427 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) ||
1429 want_cookie)
1430 goto drop_and_free;
1431
1432 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1433 return 0;
1434
1435drop_and_release:
1436 dst_release(dst);
1437drop_and_free:
1438 reqsk_free(req);
1439drop:
1440 return 0;
1441}
1442EXPORT_SYMBOL(tcp_v4_conn_request);
1443
1444
1445/*
1446 * The three way handshake has completed - we got a valid synack -
1447 * now create the new socket.
1448 */
1449struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1450 struct request_sock *req,
1451 struct dst_entry *dst)
1452{
1453 struct inet_request_sock *ireq;
1454 struct inet_sock *newinet;
1455 struct tcp_sock *newtp;
1456 struct sock *newsk;
1457#ifdef CONFIG_TCP_MD5SIG
1458 struct tcp_md5sig_key *key;
1459#endif
1460 struct ip_options_rcu *inet_opt;
1461
1462 if (sk_acceptq_is_full(sk))
1463 goto exit_overflow;
1464
1465 newsk = tcp_create_openreq_child(sk, req, skb);
1466 if (!newsk)
1467 goto exit_nonewsk;
1468
1469 newsk->sk_gso_type = SKB_GSO_TCPV4;
1470
1471 newtp = tcp_sk(newsk);
1472 newinet = inet_sk(newsk);
1473 ireq = inet_rsk(req);
1474 newinet->inet_daddr = ireq->rmt_addr;
1475 newinet->inet_rcv_saddr = ireq->loc_addr;
1476 newinet->inet_saddr = ireq->loc_addr;
1477 inet_opt = ireq->opt;
1478 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1479 ireq->opt = NULL;
1480 newinet->mc_index = inet_iif(skb);
1481 newinet->mc_ttl = ip_hdr(skb)->ttl;
1482 newinet->rcv_tos = ip_hdr(skb)->tos;
1483 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1484 if (inet_opt)
1485 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1486 newinet->inet_id = newtp->write_seq ^ jiffies;
1487
1488 if (!dst) {
1489 dst = inet_csk_route_child_sock(sk, newsk, req);
1490 if (!dst)
1491 goto put_and_exit;
1492 } else {
1493 /* syncookie case : see end of cookie_v4_check() */
1494 }
1495 sk_setup_caps(newsk, dst);
1496
1497 tcp_mtup_init(newsk);
1498 tcp_sync_mss(newsk, dst_mtu(dst));
1499 newtp->advmss = dst_metric_advmss(dst);
1500 if (tcp_sk(sk)->rx_opt.user_mss &&
1501 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1502 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1503
1504 tcp_initialize_rcv_mss(newsk);
1505 if (tcp_rsk(req)->snt_synack)
1506 tcp_valid_rtt_meas(newsk,
1507 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1508 newtp->total_retrans = req->retrans;
1509
1510#ifdef CONFIG_TCP_MD5SIG
1511 /* Copy over the MD5 key from the original socket */
1512 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1513 AF_INET);
1514 if (key != NULL) {
1515 /*
1516 * We're using one, so create a matching key
1517 * on the newsk structure. If we fail to get
1518 * memory, then we end up not copying the key
1519 * across. Shucks.
1520 */
1521 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1522 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1523 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1524 }
1525#endif
1526
1527 if (__inet_inherit_port(sk, newsk) < 0)
1528 goto put_and_exit;
1529 __inet_hash_nolisten(newsk, NULL);
1530
1531 return newsk;
1532
1533exit_overflow:
1534 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1535exit_nonewsk:
1536 dst_release(dst);
1537exit:
1538 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1539 return NULL;
1540put_and_exit:
1541 tcp_clear_xmit_timers(newsk);
1542 tcp_cleanup_congestion_control(newsk);
1543 bh_unlock_sock(newsk);
1544 sock_put(newsk);
1545 goto exit;
1546}
1547EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1548
1549static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1550{
1551 struct tcphdr *th = tcp_hdr(skb);
1552 const struct iphdr *iph = ip_hdr(skb);
1553 struct sock *nsk;
1554 struct request_sock **prev;
1555 /* Find possible connection requests. */
1556 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1557 iph->saddr, iph->daddr);
1558 if (req)
1559 return tcp_check_req(sk, skb, req, prev);
1560
1561 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1562 th->source, iph->daddr, th->dest, inet_iif(skb));
1563
1564 if (nsk) {
1565 if (nsk->sk_state != TCP_TIME_WAIT) {
1566 bh_lock_sock(nsk);
1567 return nsk;
1568 }
1569 inet_twsk_put(inet_twsk(nsk));
1570 return NULL;
1571 }
1572
1573#ifdef CONFIG_SYN_COOKIES
1574 if (!th->syn)
1575 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1576#endif
1577 return sk;
1578}
1579
1580static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1581{
1582 const struct iphdr *iph = ip_hdr(skb);
1583
1584 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1585 if (!tcp_v4_check(skb->len, iph->saddr,
1586 iph->daddr, skb->csum)) {
1587 skb->ip_summed = CHECKSUM_UNNECESSARY;
1588 return 0;
1589 }
1590 }
1591
1592 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1593 skb->len, IPPROTO_TCP, 0);
1594
1595 if (skb->len <= 76) {
1596 return __skb_checksum_complete(skb);
1597 }
1598 return 0;
1599}
1600
1601
1602/* The socket must have it's spinlock held when we get
1603 * here.
1604 *
1605 * We have a potential double-lock case here, so even when
1606 * doing backlog processing we use the BH locking scheme.
1607 * This is because we cannot sleep with the original spinlock
1608 * held.
1609 */
1610int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1611{
1612 struct sock *rsk;
1613#ifdef CONFIG_TCP_MD5SIG
1614 /*
1615 * We really want to reject the packet as early as possible
1616 * if:
1617 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1618 * o There is an MD5 option and we're not expecting one
1619 */
1620 if (tcp_v4_inbound_md5_hash(sk, skb))
1621 goto discard;
1622#endif
1623
1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1625 sock_rps_save_rxhash(sk, skb);
1626 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1627 rsk = sk;
1628 goto reset;
1629 }
1630 return 0;
1631 }
1632
1633 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1634 goto csum_err;
1635
1636 if (sk->sk_state == TCP_LISTEN) {
1637 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1638 if (!nsk)
1639 goto discard;
1640
1641 if (nsk != sk) {
1642 sock_rps_save_rxhash(nsk, skb);
1643 if (tcp_child_process(sk, nsk, skb)) {
1644 rsk = nsk;
1645 goto reset;
1646 }
1647 return 0;
1648 }
1649 } else
1650 sock_rps_save_rxhash(sk, skb);
1651
1652 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1653 rsk = sk;
1654 goto reset;
1655 }
1656 return 0;
1657
1658reset:
1659 tcp_v4_send_reset(rsk, skb);
1660discard:
1661 kfree_skb(skb);
1662 /* Be careful here. If this function gets more complicated and
1663 * gcc suffers from register pressure on the x86, sk (in %ebx)
1664 * might be destroyed here. This current version compiles correctly,
1665 * but you have been warned.
1666 */
1667 return 0;
1668
1669csum_err:
1670 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1671 goto discard;
1672}
1673EXPORT_SYMBOL(tcp_v4_do_rcv);
1674
1675/*
1676 * From tcp_input.c
1677 */
1678
1679int tcp_v4_rcv(struct sk_buff *skb)
1680{
1681 const struct iphdr *iph;
1682 const struct tcphdr *th;
1683 struct sock *sk;
1684 int ret;
1685 struct net *net = dev_net(skb->dev);
1686
1687 if (skb->pkt_type != PACKET_HOST)
1688 goto discard_it;
1689
1690 /* Count it even if it's bad */
1691 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1692
1693 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1694 goto discard_it;
1695
1696 th = tcp_hdr(skb);
1697
1698 if (th->doff < sizeof(struct tcphdr) / 4)
1699 goto bad_packet;
1700 if (!pskb_may_pull(skb, th->doff * 4))
1701 goto discard_it;
1702
1703 /* An explanation is required here, I think.
1704 * Packet length and doff are validated by header prediction,
1705 * provided case of th->doff==0 is eliminated.
1706 * So, we defer the checks. */
1707 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1708 goto bad_packet;
1709
1710 th = tcp_hdr(skb);
1711 iph = ip_hdr(skb);
1712 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1713 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1714 skb->len - th->doff * 4);
1715 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1716 TCP_SKB_CB(skb)->when = 0;
1717 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1718 TCP_SKB_CB(skb)->sacked = 0;
1719
1720 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1721 if (!sk)
1722 goto no_tcp_socket;
1723
1724process:
1725 if (sk->sk_state == TCP_TIME_WAIT)
1726 goto do_time_wait;
1727
1728 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1729 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1730 goto discard_and_relse;
1731 }
1732
1733 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1734 goto discard_and_relse;
1735 nf_reset(skb);
1736
1737 if (sk_filter(sk, skb))
1738 goto discard_and_relse;
1739
1740 skb->dev = NULL;
1741
1742 bh_lock_sock_nested(sk);
1743 ret = 0;
1744 if (!sock_owned_by_user(sk)) {
1745#ifdef CONFIG_NET_DMA
1746 struct tcp_sock *tp = tcp_sk(sk);
1747 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1748 tp->ucopy.dma_chan = net_dma_find_channel();
1749 if (tp->ucopy.dma_chan)
1750 ret = tcp_v4_do_rcv(sk, skb);
1751 else
1752#endif
1753 {
1754 if (!tcp_prequeue(sk, skb))
1755 ret = tcp_v4_do_rcv(sk, skb);
1756 }
1757 } else if (unlikely(sk_add_backlog(sk, skb,
1758 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1759 bh_unlock_sock(sk);
1760 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1761 goto discard_and_relse;
1762 }
1763 bh_unlock_sock(sk);
1764
1765 sock_put(sk);
1766
1767 return ret;
1768
1769no_tcp_socket:
1770 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1771 goto discard_it;
1772
1773 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1774bad_packet:
1775 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1776 } else {
1777 tcp_v4_send_reset(NULL, skb);
1778 }
1779
1780discard_it:
1781 /* Discard frame. */
1782 kfree_skb(skb);
1783 return 0;
1784
1785discard_and_relse:
1786 sock_put(sk);
1787 goto discard_it;
1788
1789do_time_wait:
1790 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1791 inet_twsk_put(inet_twsk(sk));
1792 goto discard_it;
1793 }
1794
1795 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1796 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1797 inet_twsk_put(inet_twsk(sk));
1798 goto discard_it;
1799 }
1800 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1801 case TCP_TW_SYN: {
1802 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1803 &tcp_hashinfo,
1804 iph->daddr, th->dest,
1805 inet_iif(skb));
1806 if (sk2) {
1807 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1808 inet_twsk_put(inet_twsk(sk));
1809 sk = sk2;
1810 goto process;
1811 }
1812 /* Fall through to ACK */
1813 }
1814 case TCP_TW_ACK:
1815 tcp_v4_timewait_ack(sk, skb);
1816 break;
1817 case TCP_TW_RST:
1818 goto no_tcp_socket;
1819 case TCP_TW_SUCCESS:;
1820 }
1821 goto discard_it;
1822}
1823
1824struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1825{
1826 struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
1827 struct inet_sock *inet = inet_sk(sk);
1828 struct inet_peer *peer;
1829
1830 if (!rt ||
1831 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1832 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1833 *release_it = true;
1834 } else {
1835 if (!rt->peer)
1836 rt_bind_peer(rt, inet->inet_daddr, 1);
1837 peer = rt->peer;
1838 *release_it = false;
1839 }
1840
1841 return peer;
1842}
1843EXPORT_SYMBOL(tcp_v4_get_peer);
1844
1845void *tcp_v4_tw_get_peer(struct sock *sk)
1846{
1847 const struct inet_timewait_sock *tw = inet_twsk(sk);
1848
1849 return inet_getpeer_v4(tw->tw_daddr, 1);
1850}
1851EXPORT_SYMBOL(tcp_v4_tw_get_peer);
1852
1853static struct timewait_sock_ops tcp_timewait_sock_ops = {
1854 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1855 .twsk_unique = tcp_twsk_unique,
1856 .twsk_destructor= tcp_twsk_destructor,
1857 .twsk_getpeer = tcp_v4_tw_get_peer,
1858};
1859
1860const struct inet_connection_sock_af_ops ipv4_specific = {
1861 .queue_xmit = ip_queue_xmit,
1862 .send_check = tcp_v4_send_check,
1863 .rebuild_header = inet_sk_rebuild_header,
1864 .conn_request = tcp_v4_conn_request,
1865 .syn_recv_sock = tcp_v4_syn_recv_sock,
1866 .get_peer = tcp_v4_get_peer,
1867 .net_header_len = sizeof(struct iphdr),
1868 .setsockopt = ip_setsockopt,
1869 .getsockopt = ip_getsockopt,
1870 .addr2sockaddr = inet_csk_addr2sockaddr,
1871 .sockaddr_len = sizeof(struct sockaddr_in),
1872 .bind_conflict = inet_csk_bind_conflict,
1873#ifdef CONFIG_COMPAT
1874 .compat_setsockopt = compat_ip_setsockopt,
1875 .compat_getsockopt = compat_ip_getsockopt,
1876#endif
1877};
1878EXPORT_SYMBOL(ipv4_specific);
1879
1880#ifdef CONFIG_TCP_MD5SIG
1881static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1882 .md5_lookup = tcp_v4_md5_lookup,
1883 .calc_md5_hash = tcp_v4_md5_hash_skb,
1884 .md5_parse = tcp_v4_parse_md5_keys,
1885};
1886#endif
1887
1888/* NOTE: A lot of things set to zero explicitly by call to
1889 * sk_alloc() so need not be done here.
1890 */
1891static int tcp_v4_init_sock(struct sock *sk)
1892{
1893 struct inet_connection_sock *icsk = inet_csk(sk);
1894
1895 tcp_init_sock(sk);
1896
1897 icsk->icsk_af_ops = &ipv4_specific;
1898
1899#ifdef CONFIG_TCP_MD5SIG
1900 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1901#endif
1902
1903 return 0;
1904}
1905
1906void tcp_v4_destroy_sock(struct sock *sk)
1907{
1908 struct tcp_sock *tp = tcp_sk(sk);
1909
1910 tcp_clear_xmit_timers(sk);
1911
1912 tcp_cleanup_congestion_control(sk);
1913
1914 /* Cleanup up the write buffer. */
1915 tcp_write_queue_purge(sk);
1916
1917 /* Cleans up our, hopefully empty, out_of_order_queue. */
1918 __skb_queue_purge(&tp->out_of_order_queue);
1919
1920#ifdef CONFIG_TCP_MD5SIG
1921 /* Clean up the MD5 key list, if any */
1922 if (tp->md5sig_info) {
1923 tcp_clear_md5_list(sk);
1924 kfree_rcu(tp->md5sig_info, rcu);
1925 tp->md5sig_info = NULL;
1926 }
1927#endif
1928
1929#ifdef CONFIG_NET_DMA
1930 /* Cleans up our sk_async_wait_queue */
1931 __skb_queue_purge(&sk->sk_async_wait_queue);
1932#endif
1933
1934 /* Clean prequeue, it must be empty really */
1935 __skb_queue_purge(&tp->ucopy.prequeue);
1936
1937 /* Clean up a referenced TCP bind bucket. */
1938 if (inet_csk(sk)->icsk_bind_hash)
1939 inet_put_port(sk);
1940
1941 /*
1942 * If sendmsg cached page exists, toss it.
1943 */
1944 if (sk->sk_sndmsg_page) {
1945 __free_page(sk->sk_sndmsg_page);
1946 sk->sk_sndmsg_page = NULL;
1947 }
1948
1949 /* TCP Cookie Transactions */
1950 if (tp->cookie_values != NULL) {
1951 kref_put(&tp->cookie_values->kref,
1952 tcp_cookie_values_release);
1953 tp->cookie_values = NULL;
1954 }
1955
1956 sk_sockets_allocated_dec(sk);
1957 sock_release_memcg(sk);
1958}
1959EXPORT_SYMBOL(tcp_v4_destroy_sock);
1960
1961#ifdef CONFIG_PROC_FS
1962/* Proc filesystem TCP sock list dumping. */
1963
1964static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1965{
1966 return hlist_nulls_empty(head) ? NULL :
1967 list_entry(head->first, struct inet_timewait_sock, tw_node);
1968}
1969
1970static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1971{
1972 return !is_a_nulls(tw->tw_node.next) ?
1973 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1974}
1975
1976/*
1977 * Get next listener socket follow cur. If cur is NULL, get first socket
1978 * starting from bucket given in st->bucket; when st->bucket is zero the
1979 * very first socket in the hash table is returned.
1980 */
1981static void *listening_get_next(struct seq_file *seq, void *cur)
1982{
1983 struct inet_connection_sock *icsk;
1984 struct hlist_nulls_node *node;
1985 struct sock *sk = cur;
1986 struct inet_listen_hashbucket *ilb;
1987 struct tcp_iter_state *st = seq->private;
1988 struct net *net = seq_file_net(seq);
1989
1990 if (!sk) {
1991 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1992 spin_lock_bh(&ilb->lock);
1993 sk = sk_nulls_head(&ilb->head);
1994 st->offset = 0;
1995 goto get_sk;
1996 }
1997 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1998 ++st->num;
1999 ++st->offset;
2000
2001 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2002 struct request_sock *req = cur;
2003
2004 icsk = inet_csk(st->syn_wait_sk);
2005 req = req->dl_next;
2006 while (1) {
2007 while (req) {
2008 if (req->rsk_ops->family == st->family) {
2009 cur = req;
2010 goto out;
2011 }
2012 req = req->dl_next;
2013 }
2014 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2015 break;
2016get_req:
2017 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2018 }
2019 sk = sk_nulls_next(st->syn_wait_sk);
2020 st->state = TCP_SEQ_STATE_LISTENING;
2021 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2022 } else {
2023 icsk = inet_csk(sk);
2024 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2025 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2026 goto start_req;
2027 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2028 sk = sk_nulls_next(sk);
2029 }
2030get_sk:
2031 sk_nulls_for_each_from(sk, node) {
2032 if (!net_eq(sock_net(sk), net))
2033 continue;
2034 if (sk->sk_family == st->family) {
2035 cur = sk;
2036 goto out;
2037 }
2038 icsk = inet_csk(sk);
2039 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2040 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2041start_req:
2042 st->uid = sock_i_uid(sk);
2043 st->syn_wait_sk = sk;
2044 st->state = TCP_SEQ_STATE_OPENREQ;
2045 st->sbucket = 0;
2046 goto get_req;
2047 }
2048 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2049 }
2050 spin_unlock_bh(&ilb->lock);
2051 st->offset = 0;
2052 if (++st->bucket < INET_LHTABLE_SIZE) {
2053 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2054 spin_lock_bh(&ilb->lock);
2055 sk = sk_nulls_head(&ilb->head);
2056 goto get_sk;
2057 }
2058 cur = NULL;
2059out:
2060 return cur;
2061}
2062
2063static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2064{
2065 struct tcp_iter_state *st = seq->private;
2066 void *rc;
2067
2068 st->bucket = 0;
2069 st->offset = 0;
2070 rc = listening_get_next(seq, NULL);
2071
2072 while (rc && *pos) {
2073 rc = listening_get_next(seq, rc);
2074 --*pos;
2075 }
2076 return rc;
2077}
2078
2079static inline bool empty_bucket(struct tcp_iter_state *st)
2080{
2081 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2082 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2083}
2084
2085/*
2086 * Get first established socket starting from bucket given in st->bucket.
2087 * If st->bucket is zero, the very first socket in the hash is returned.
2088 */
2089static void *established_get_first(struct seq_file *seq)
2090{
2091 struct tcp_iter_state *st = seq->private;
2092 struct net *net = seq_file_net(seq);
2093 void *rc = NULL;
2094
2095 st->offset = 0;
2096 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2097 struct sock *sk;
2098 struct hlist_nulls_node *node;
2099 struct inet_timewait_sock *tw;
2100 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2101
2102 /* Lockless fast path for the common case of empty buckets */
2103 if (empty_bucket(st))
2104 continue;
2105
2106 spin_lock_bh(lock);
2107 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2108 if (sk->sk_family != st->family ||
2109 !net_eq(sock_net(sk), net)) {
2110 continue;
2111 }
2112 rc = sk;
2113 goto out;
2114 }
2115 st->state = TCP_SEQ_STATE_TIME_WAIT;
2116 inet_twsk_for_each(tw, node,
2117 &tcp_hashinfo.ehash[st->bucket].twchain) {
2118 if (tw->tw_family != st->family ||
2119 !net_eq(twsk_net(tw), net)) {
2120 continue;
2121 }
2122 rc = tw;
2123 goto out;
2124 }
2125 spin_unlock_bh(lock);
2126 st->state = TCP_SEQ_STATE_ESTABLISHED;
2127 }
2128out:
2129 return rc;
2130}
2131
2132static void *established_get_next(struct seq_file *seq, void *cur)
2133{
2134 struct sock *sk = cur;
2135 struct inet_timewait_sock *tw;
2136 struct hlist_nulls_node *node;
2137 struct tcp_iter_state *st = seq->private;
2138 struct net *net = seq_file_net(seq);
2139
2140 ++st->num;
2141 ++st->offset;
2142
2143 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2144 tw = cur;
2145 tw = tw_next(tw);
2146get_tw:
2147 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2148 tw = tw_next(tw);
2149 }
2150 if (tw) {
2151 cur = tw;
2152 goto out;
2153 }
2154 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2155 st->state = TCP_SEQ_STATE_ESTABLISHED;
2156
2157 /* Look for next non empty bucket */
2158 st->offset = 0;
2159 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2160 empty_bucket(st))
2161 ;
2162 if (st->bucket > tcp_hashinfo.ehash_mask)
2163 return NULL;
2164
2165 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2166 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2167 } else
2168 sk = sk_nulls_next(sk);
2169
2170 sk_nulls_for_each_from(sk, node) {
2171 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2172 goto found;
2173 }
2174
2175 st->state = TCP_SEQ_STATE_TIME_WAIT;
2176 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2177 goto get_tw;
2178found:
2179 cur = sk;
2180out:
2181 return cur;
2182}
2183
2184static void *established_get_idx(struct seq_file *seq, loff_t pos)
2185{
2186 struct tcp_iter_state *st = seq->private;
2187 void *rc;
2188
2189 st->bucket = 0;
2190 rc = established_get_first(seq);
2191
2192 while (rc && pos) {
2193 rc = established_get_next(seq, rc);
2194 --pos;
2195 }
2196 return rc;
2197}
2198
2199static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2200{
2201 void *rc;
2202 struct tcp_iter_state *st = seq->private;
2203
2204 st->state = TCP_SEQ_STATE_LISTENING;
2205 rc = listening_get_idx(seq, &pos);
2206
2207 if (!rc) {
2208 st->state = TCP_SEQ_STATE_ESTABLISHED;
2209 rc = established_get_idx(seq, pos);
2210 }
2211
2212 return rc;
2213}
2214
2215static void *tcp_seek_last_pos(struct seq_file *seq)
2216{
2217 struct tcp_iter_state *st = seq->private;
2218 int offset = st->offset;
2219 int orig_num = st->num;
2220 void *rc = NULL;
2221
2222 switch (st->state) {
2223 case TCP_SEQ_STATE_OPENREQ:
2224 case TCP_SEQ_STATE_LISTENING:
2225 if (st->bucket >= INET_LHTABLE_SIZE)
2226 break;
2227 st->state = TCP_SEQ_STATE_LISTENING;
2228 rc = listening_get_next(seq, NULL);
2229 while (offset-- && rc)
2230 rc = listening_get_next(seq, rc);
2231 if (rc)
2232 break;
2233 st->bucket = 0;
2234 /* Fallthrough */
2235 case TCP_SEQ_STATE_ESTABLISHED:
2236 case TCP_SEQ_STATE_TIME_WAIT:
2237 st->state = TCP_SEQ_STATE_ESTABLISHED;
2238 if (st->bucket > tcp_hashinfo.ehash_mask)
2239 break;
2240 rc = established_get_first(seq);
2241 while (offset-- && rc)
2242 rc = established_get_next(seq, rc);
2243 }
2244
2245 st->num = orig_num;
2246
2247 return rc;
2248}
2249
2250static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2251{
2252 struct tcp_iter_state *st = seq->private;
2253 void *rc;
2254
2255 if (*pos && *pos == st->last_pos) {
2256 rc = tcp_seek_last_pos(seq);
2257 if (rc)
2258 goto out;
2259 }
2260
2261 st->state = TCP_SEQ_STATE_LISTENING;
2262 st->num = 0;
2263 st->bucket = 0;
2264 st->offset = 0;
2265 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2266
2267out:
2268 st->last_pos = *pos;
2269 return rc;
2270}
2271
2272static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2273{
2274 struct tcp_iter_state *st = seq->private;
2275 void *rc = NULL;
2276
2277 if (v == SEQ_START_TOKEN) {
2278 rc = tcp_get_idx(seq, 0);
2279 goto out;
2280 }
2281
2282 switch (st->state) {
2283 case TCP_SEQ_STATE_OPENREQ:
2284 case TCP_SEQ_STATE_LISTENING:
2285 rc = listening_get_next(seq, v);
2286 if (!rc) {
2287 st->state = TCP_SEQ_STATE_ESTABLISHED;
2288 st->bucket = 0;
2289 st->offset = 0;
2290 rc = established_get_first(seq);
2291 }
2292 break;
2293 case TCP_SEQ_STATE_ESTABLISHED:
2294 case TCP_SEQ_STATE_TIME_WAIT:
2295 rc = established_get_next(seq, v);
2296 break;
2297 }
2298out:
2299 ++*pos;
2300 st->last_pos = *pos;
2301 return rc;
2302}
2303
2304static void tcp_seq_stop(struct seq_file *seq, void *v)
2305{
2306 struct tcp_iter_state *st = seq->private;
2307
2308 switch (st->state) {
2309 case TCP_SEQ_STATE_OPENREQ:
2310 if (v) {
2311 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2312 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2313 }
2314 case TCP_SEQ_STATE_LISTENING:
2315 if (v != SEQ_START_TOKEN)
2316 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2317 break;
2318 case TCP_SEQ_STATE_TIME_WAIT:
2319 case TCP_SEQ_STATE_ESTABLISHED:
2320 if (v)
2321 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2322 break;
2323 }
2324}
2325
2326int tcp_seq_open(struct inode *inode, struct file *file)
2327{
2328 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2329 struct tcp_iter_state *s;
2330 int err;
2331
2332 err = seq_open_net(inode, file, &afinfo->seq_ops,
2333 sizeof(struct tcp_iter_state));
2334 if (err < 0)
2335 return err;
2336
2337 s = ((struct seq_file *)file->private_data)->private;
2338 s->family = afinfo->family;
2339 s->last_pos = 0;
2340 return 0;
2341}
2342EXPORT_SYMBOL(tcp_seq_open);
2343
2344int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2345{
2346 int rc = 0;
2347 struct proc_dir_entry *p;
2348
2349 afinfo->seq_ops.start = tcp_seq_start;
2350 afinfo->seq_ops.next = tcp_seq_next;
2351 afinfo->seq_ops.stop = tcp_seq_stop;
2352
2353 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2354 afinfo->seq_fops, afinfo);
2355 if (!p)
2356 rc = -ENOMEM;
2357 return rc;
2358}
2359EXPORT_SYMBOL(tcp_proc_register);
2360
2361void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2362{
2363 proc_net_remove(net, afinfo->name);
2364}
2365EXPORT_SYMBOL(tcp_proc_unregister);
2366
2367static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2368 struct seq_file *f, int i, int uid, int *len)
2369{
2370 const struct inet_request_sock *ireq = inet_rsk(req);
2371 int ttd = req->expires - jiffies;
2372
2373 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2374 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2375 i,
2376 ireq->loc_addr,
2377 ntohs(inet_sk(sk)->inet_sport),
2378 ireq->rmt_addr,
2379 ntohs(ireq->rmt_port),
2380 TCP_SYN_RECV,
2381 0, 0, /* could print option size, but that is af dependent. */
2382 1, /* timers active (only the expire timer) */
2383 jiffies_to_clock_t(ttd),
2384 req->retrans,
2385 uid,
2386 0, /* non standard timer */
2387 0, /* open_requests have no inode */
2388 atomic_read(&sk->sk_refcnt),
2389 req,
2390 len);
2391}
2392
2393static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2394{
2395 int timer_active;
2396 unsigned long timer_expires;
2397 const struct tcp_sock *tp = tcp_sk(sk);
2398 const struct inet_connection_sock *icsk = inet_csk(sk);
2399 const struct inet_sock *inet = inet_sk(sk);
2400 __be32 dest = inet->inet_daddr;
2401 __be32 src = inet->inet_rcv_saddr;
2402 __u16 destp = ntohs(inet->inet_dport);
2403 __u16 srcp = ntohs(inet->inet_sport);
2404 int rx_queue;
2405
2406 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2407 timer_active = 1;
2408 timer_expires = icsk->icsk_timeout;
2409 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2410 timer_active = 4;
2411 timer_expires = icsk->icsk_timeout;
2412 } else if (timer_pending(&sk->sk_timer)) {
2413 timer_active = 2;
2414 timer_expires = sk->sk_timer.expires;
2415 } else {
2416 timer_active = 0;
2417 timer_expires = jiffies;
2418 }
2419
2420 if (sk->sk_state == TCP_LISTEN)
2421 rx_queue = sk->sk_ack_backlog;
2422 else
2423 /*
2424 * because we dont lock socket, we might find a transient negative value
2425 */
2426 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2427
2428 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2429 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2430 i, src, srcp, dest, destp, sk->sk_state,
2431 tp->write_seq - tp->snd_una,
2432 rx_queue,
2433 timer_active,
2434 jiffies_to_clock_t(timer_expires - jiffies),
2435 icsk->icsk_retransmits,
2436 sock_i_uid(sk),
2437 icsk->icsk_probes_out,
2438 sock_i_ino(sk),
2439 atomic_read(&sk->sk_refcnt), sk,
2440 jiffies_to_clock_t(icsk->icsk_rto),
2441 jiffies_to_clock_t(icsk->icsk_ack.ato),
2442 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2443 tp->snd_cwnd,
2444 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2445 len);
2446}
2447
2448static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2449 struct seq_file *f, int i, int *len)
2450{
2451 __be32 dest, src;
2452 __u16 destp, srcp;
2453 int ttd = tw->tw_ttd - jiffies;
2454
2455 if (ttd < 0)
2456 ttd = 0;
2457
2458 dest = tw->tw_daddr;
2459 src = tw->tw_rcv_saddr;
2460 destp = ntohs(tw->tw_dport);
2461 srcp = ntohs(tw->tw_sport);
2462
2463 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2464 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2465 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2466 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2467 atomic_read(&tw->tw_refcnt), tw, len);
2468}
2469
2470#define TMPSZ 150
2471
2472static int tcp4_seq_show(struct seq_file *seq, void *v)
2473{
2474 struct tcp_iter_state *st;
2475 int len;
2476
2477 if (v == SEQ_START_TOKEN) {
2478 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2479 " sl local_address rem_address st tx_queue "
2480 "rx_queue tr tm->when retrnsmt uid timeout "
2481 "inode");
2482 goto out;
2483 }
2484 st = seq->private;
2485
2486 switch (st->state) {
2487 case TCP_SEQ_STATE_LISTENING:
2488 case TCP_SEQ_STATE_ESTABLISHED:
2489 get_tcp4_sock(v, seq, st->num, &len);
2490 break;
2491 case TCP_SEQ_STATE_OPENREQ:
2492 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2493 break;
2494 case TCP_SEQ_STATE_TIME_WAIT:
2495 get_timewait4_sock(v, seq, st->num, &len);
2496 break;
2497 }
2498 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2499out:
2500 return 0;
2501}
2502
2503static const struct file_operations tcp_afinfo_seq_fops = {
2504 .owner = THIS_MODULE,
2505 .open = tcp_seq_open,
2506 .read = seq_read,
2507 .llseek = seq_lseek,
2508 .release = seq_release_net
2509};
2510
2511static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2512 .name = "tcp",
2513 .family = AF_INET,
2514 .seq_fops = &tcp_afinfo_seq_fops,
2515 .seq_ops = {
2516 .show = tcp4_seq_show,
2517 },
2518};
2519
2520static int __net_init tcp4_proc_init_net(struct net *net)
2521{
2522 return tcp_proc_register(net, &tcp4_seq_afinfo);
2523}
2524
2525static void __net_exit tcp4_proc_exit_net(struct net *net)
2526{
2527 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2528}
2529
2530static struct pernet_operations tcp4_net_ops = {
2531 .init = tcp4_proc_init_net,
2532 .exit = tcp4_proc_exit_net,
2533};
2534
2535int __init tcp4_proc_init(void)
2536{
2537 return register_pernet_subsys(&tcp4_net_ops);
2538}
2539
2540void tcp4_proc_exit(void)
2541{
2542 unregister_pernet_subsys(&tcp4_net_ops);
2543}
2544#endif /* CONFIG_PROC_FS */
2545
2546struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2547{
2548 const struct iphdr *iph = skb_gro_network_header(skb);
2549
2550 switch (skb->ip_summed) {
2551 case CHECKSUM_COMPLETE:
2552 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2553 skb->csum)) {
2554 skb->ip_summed = CHECKSUM_UNNECESSARY;
2555 break;
2556 }
2557
2558 /* fall through */
2559 case CHECKSUM_NONE:
2560 NAPI_GRO_CB(skb)->flush = 1;
2561 return NULL;
2562 }
2563
2564 return tcp_gro_receive(head, skb);
2565}
2566
2567int tcp4_gro_complete(struct sk_buff *skb)
2568{
2569 const struct iphdr *iph = ip_hdr(skb);
2570 struct tcphdr *th = tcp_hdr(skb);
2571
2572 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2573 iph->saddr, iph->daddr, 0);
2574 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2575
2576 return tcp_gro_complete(skb);
2577}
2578
2579struct proto tcp_prot = {
2580 .name = "TCP",
2581 .owner = THIS_MODULE,
2582 .close = tcp_close,
2583 .connect = tcp_v4_connect,
2584 .disconnect = tcp_disconnect,
2585 .accept = inet_csk_accept,
2586 .ioctl = tcp_ioctl,
2587 .init = tcp_v4_init_sock,
2588 .destroy = tcp_v4_destroy_sock,
2589 .shutdown = tcp_shutdown,
2590 .setsockopt = tcp_setsockopt,
2591 .getsockopt = tcp_getsockopt,
2592 .recvmsg = tcp_recvmsg,
2593 .sendmsg = tcp_sendmsg,
2594 .sendpage = tcp_sendpage,
2595 .backlog_rcv = tcp_v4_do_rcv,
2596 .hash = inet_hash,
2597 .unhash = inet_unhash,
2598 .get_port = inet_csk_get_port,
2599 .enter_memory_pressure = tcp_enter_memory_pressure,
2600 .sockets_allocated = &tcp_sockets_allocated,
2601 .orphan_count = &tcp_orphan_count,
2602 .memory_allocated = &tcp_memory_allocated,
2603 .memory_pressure = &tcp_memory_pressure,
2604 .sysctl_wmem = sysctl_tcp_wmem,
2605 .sysctl_rmem = sysctl_tcp_rmem,
2606 .max_header = MAX_TCP_HEADER,
2607 .obj_size = sizeof(struct tcp_sock),
2608 .slab_flags = SLAB_DESTROY_BY_RCU,
2609 .twsk_prot = &tcp_timewait_sock_ops,
2610 .rsk_prot = &tcp_request_sock_ops,
2611 .h.hashinfo = &tcp_hashinfo,
2612 .no_autobind = true,
2613#ifdef CONFIG_COMPAT
2614 .compat_setsockopt = compat_tcp_setsockopt,
2615 .compat_getsockopt = compat_tcp_getsockopt,
2616#endif
2617#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2618 .init_cgroup = tcp_init_cgroup,
2619 .destroy_cgroup = tcp_destroy_cgroup,
2620 .proto_cgroup = tcp_proto_cgroup,
2621#endif
2622};
2623EXPORT_SYMBOL(tcp_prot);
2624
2625static int __net_init tcp_sk_init(struct net *net)
2626{
2627 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2628 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2629}
2630
2631static void __net_exit tcp_sk_exit(struct net *net)
2632{
2633 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2634}
2635
2636static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2637{
2638 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2639}
2640
2641static struct pernet_operations __net_initdata tcp_sk_ops = {
2642 .init = tcp_sk_init,
2643 .exit = tcp_sk_exit,
2644 .exit_batch = tcp_sk_exit_batch,
2645};
2646
2647void __init tcp_v4_init(void)
2648{
2649 inet_hashinfo_init(&tcp_hashinfo);
2650 if (register_pernet_subsys(&tcp_sk_ops))
2651 panic("Failed to create the TCP control socket.\n");
2652}