Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * IPv4 specific functions
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 */
18
19/*
20 * Changes:
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
29 * ACK bit.
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
32 * request_sock handling and moved
33 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
35 * Added new listen semantics.
36 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
40 * coma.
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
46 */
47
48#define pr_fmt(fmt) "TCP: " fmt
49
50#include <linux/bottom_half.h>
51#include <linux/types.h>
52#include <linux/fcntl.h>
53#include <linux/module.h>
54#include <linux/random.h>
55#include <linux/cache.h>
56#include <linux/jhash.h>
57#include <linux/init.h>
58#include <linux/times.h>
59#include <linux/slab.h>
60#include <linux/sched.h>
61
62#include <net/net_namespace.h>
63#include <net/icmp.h>
64#include <net/inet_hashtables.h>
65#include <net/tcp.h>
66#include <net/transp_v6.h>
67#include <net/ipv6.h>
68#include <net/inet_common.h>
69#include <net/timewait_sock.h>
70#include <net/xfrm.h>
71#include <net/secure_seq.h>
72#include <net/busy_poll.h>
73
74#include <linux/inet.h>
75#include <linux/ipv6.h>
76#include <linux/stddef.h>
77#include <linux/proc_fs.h>
78#include <linux/seq_file.h>
79#include <linux/inetdevice.h>
80#include <linux/btf_ids.h>
81
82#include <crypto/hash.h>
83#include <linux/scatterlist.h>
84
85#include <trace/events/tcp.h>
86
87#ifdef CONFIG_TCP_MD5SIG
88static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
89 __be32 daddr, __be32 saddr, const struct tcphdr *th);
90#endif
91
92struct inet_hashinfo tcp_hashinfo;
93EXPORT_SYMBOL(tcp_hashinfo);
94
95static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
96
97static u32 tcp_v4_init_seq(const struct sk_buff *skb)
98{
99 return secure_tcp_seq(ip_hdr(skb)->daddr,
100 ip_hdr(skb)->saddr,
101 tcp_hdr(skb)->dest,
102 tcp_hdr(skb)->source);
103}
104
105static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
106{
107 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
108}
109
110int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111{
112 int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
113 const struct inet_timewait_sock *tw = inet_twsk(sktw);
114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
116
117 if (reuse == 2) {
118 /* Still does not detect *everything* that goes through
119 * lo, since we require a loopback src or dst address
120 * or direct binding to 'lo' interface.
121 */
122 bool loopback = false;
123 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
124 loopback = true;
125#if IS_ENABLED(CONFIG_IPV6)
126 if (tw->tw_family == AF_INET6) {
127 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
128 ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
129 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
130 ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
131 loopback = true;
132 } else
133#endif
134 {
135 if (ipv4_is_loopback(tw->tw_daddr) ||
136 ipv4_is_loopback(tw->tw_rcv_saddr))
137 loopback = true;
138 }
139 if (!loopback)
140 reuse = 0;
141 }
142
143 /* With PAWS, it is safe from the viewpoint
144 of data integrity. Even without PAWS it is safe provided sequence
145 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
146
147 Actually, the idea is close to VJ's one, only timestamp cache is
148 held not per host, but per port pair and TW bucket is used as state
149 holder.
150
151 If TW bucket has been already destroyed we fall back to VJ's scheme
152 and use initial timestamp retrieved from peer table.
153 */
154 if (tcptw->tw_ts_recent_stamp &&
155 (!twp || (reuse && time_after32(ktime_get_seconds(),
156 tcptw->tw_ts_recent_stamp)))) {
157 /* inet_twsk_hashdance() sets sk_refcnt after putting twsk
158 * and releasing the bucket lock.
159 */
160 if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt)))
161 return 0;
162
163 /* In case of repair and re-using TIME-WAIT sockets we still
164 * want to be sure that it is safe as above but honor the
165 * sequence numbers and time stamps set as part of the repair
166 * process.
167 *
168 * Without this check re-using a TIME-WAIT socket with TCP
169 * repair would accumulate a -1 on the repair assigned
170 * sequence number. The first time it is reused the sequence
171 * is -1, the second time -2, etc. This fixes that issue
172 * without appearing to create any others.
173 */
174 if (likely(!tp->repair)) {
175 u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
176
177 if (!seq)
178 seq = 1;
179 WRITE_ONCE(tp->write_seq, seq);
180 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
181 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
182 }
183
184 return 1;
185 }
186
187 return 0;
188}
189EXPORT_SYMBOL_GPL(tcp_twsk_unique);
190
191static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
192 int addr_len)
193{
194 /* This check is replicated from tcp_v4_connect() and intended to
195 * prevent BPF program called below from accessing bytes that are out
196 * of the bound specified by user in addr_len.
197 */
198 if (addr_len < sizeof(struct sockaddr_in))
199 return -EINVAL;
200
201 sock_owned_by_me(sk);
202
203 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, &addr_len);
204}
205
206/* This will initiate an outgoing connection. */
207int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
208{
209 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
210 struct inet_timewait_death_row *tcp_death_row;
211 struct inet_sock *inet = inet_sk(sk);
212 struct tcp_sock *tp = tcp_sk(sk);
213 struct ip_options_rcu *inet_opt;
214 struct net *net = sock_net(sk);
215 __be16 orig_sport, orig_dport;
216 __be32 daddr, nexthop;
217 struct flowi4 *fl4;
218 struct rtable *rt;
219 int err;
220
221 if (addr_len < sizeof(struct sockaddr_in))
222 return -EINVAL;
223
224 if (usin->sin_family != AF_INET)
225 return -EAFNOSUPPORT;
226
227 nexthop = daddr = usin->sin_addr.s_addr;
228 inet_opt = rcu_dereference_protected(inet->inet_opt,
229 lockdep_sock_is_held(sk));
230 if (inet_opt && inet_opt->opt.srr) {
231 if (!daddr)
232 return -EINVAL;
233 nexthop = inet_opt->opt.faddr;
234 }
235
236 orig_sport = inet->inet_sport;
237 orig_dport = usin->sin_port;
238 fl4 = &inet->cork.fl.u.ip4;
239 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
240 sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport,
241 orig_dport, sk);
242 if (IS_ERR(rt)) {
243 err = PTR_ERR(rt);
244 if (err == -ENETUNREACH)
245 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
246 return err;
247 }
248
249 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
250 ip_rt_put(rt);
251 return -ENETUNREACH;
252 }
253
254 if (!inet_opt || !inet_opt->opt.srr)
255 daddr = fl4->daddr;
256
257 tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
258
259 if (!inet->inet_saddr) {
260 err = inet_bhash2_update_saddr(sk, &fl4->saddr, AF_INET);
261 if (err) {
262 ip_rt_put(rt);
263 return err;
264 }
265 } else {
266 sk_rcv_saddr_set(sk, inet->inet_saddr);
267 }
268
269 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
270 /* Reset inherited state */
271 tp->rx_opt.ts_recent = 0;
272 tp->rx_opt.ts_recent_stamp = 0;
273 if (likely(!tp->repair))
274 WRITE_ONCE(tp->write_seq, 0);
275 }
276
277 inet->inet_dport = usin->sin_port;
278 sk_daddr_set(sk, daddr);
279
280 inet_csk(sk)->icsk_ext_hdr_len = 0;
281 if (inet_opt)
282 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
283
284 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
285
286 /* Socket identity is still unknown (sport may be zero).
287 * However we set state to SYN-SENT and not releasing socket
288 * lock select source port, enter ourselves into the hash tables and
289 * complete initialization after this.
290 */
291 tcp_set_state(sk, TCP_SYN_SENT);
292 err = inet_hash_connect(tcp_death_row, sk);
293 if (err)
294 goto failure;
295
296 sk_set_txhash(sk);
297
298 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
299 inet->inet_sport, inet->inet_dport, sk);
300 if (IS_ERR(rt)) {
301 err = PTR_ERR(rt);
302 rt = NULL;
303 goto failure;
304 }
305 tp->tcp_usec_ts = dst_tcp_usec_ts(&rt->dst);
306 /* OK, now commit destination to socket. */
307 sk->sk_gso_type = SKB_GSO_TCPV4;
308 sk_setup_caps(sk, &rt->dst);
309 rt = NULL;
310
311 if (likely(!tp->repair)) {
312 if (!tp->write_seq)
313 WRITE_ONCE(tp->write_seq,
314 secure_tcp_seq(inet->inet_saddr,
315 inet->inet_daddr,
316 inet->inet_sport,
317 usin->sin_port));
318 WRITE_ONCE(tp->tsoffset,
319 secure_tcp_ts_off(net, inet->inet_saddr,
320 inet->inet_daddr));
321 }
322
323 atomic_set(&inet->inet_id, get_random_u16());
324
325 if (tcp_fastopen_defer_connect(sk, &err))
326 return err;
327 if (err)
328 goto failure;
329
330 err = tcp_connect(sk);
331
332 if (err)
333 goto failure;
334
335 return 0;
336
337failure:
338 /*
339 * This unhashes the socket and releases the local port,
340 * if necessary.
341 */
342 tcp_set_state(sk, TCP_CLOSE);
343 inet_bhash2_reset_saddr(sk);
344 ip_rt_put(rt);
345 sk->sk_route_caps = 0;
346 inet->inet_dport = 0;
347 return err;
348}
349EXPORT_SYMBOL(tcp_v4_connect);
350
351/*
352 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
353 * It can be called through tcp_release_cb() if socket was owned by user
354 * at the time tcp_v4_err() was called to handle ICMP message.
355 */
356void tcp_v4_mtu_reduced(struct sock *sk)
357{
358 struct inet_sock *inet = inet_sk(sk);
359 struct dst_entry *dst;
360 u32 mtu;
361
362 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
363 return;
364 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
365 dst = inet_csk_update_pmtu(sk, mtu);
366 if (!dst)
367 return;
368
369 /* Something is about to be wrong... Remember soft error
370 * for the case, if this connection will not able to recover.
371 */
372 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
373 WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
374
375 mtu = dst_mtu(dst);
376
377 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
378 ip_sk_accept_pmtu(sk) &&
379 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
380 tcp_sync_mss(sk, mtu);
381
382 /* Resend the TCP packet because it's
383 * clear that the old packet has been
384 * dropped. This is the new "fast" path mtu
385 * discovery.
386 */
387 tcp_simple_retransmit(sk);
388 } /* else let the usual retransmit timer handle it */
389}
390EXPORT_SYMBOL(tcp_v4_mtu_reduced);
391
392static void do_redirect(struct sk_buff *skb, struct sock *sk)
393{
394 struct dst_entry *dst = __sk_dst_check(sk, 0);
395
396 if (dst)
397 dst->ops->redirect(dst, sk, skb);
398}
399
400
401/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
402void tcp_req_err(struct sock *sk, u32 seq, bool abort)
403{
404 struct request_sock *req = inet_reqsk(sk);
405 struct net *net = sock_net(sk);
406
407 /* ICMPs are not backlogged, hence we cannot get
408 * an established socket here.
409 */
410 if (seq != tcp_rsk(req)->snt_isn) {
411 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
412 } else if (abort) {
413 /*
414 * Still in SYN_RECV, just remove it silently.
415 * There is no good way to pass the error to the newly
416 * created socket, and POSIX does not want network
417 * errors returned from accept().
418 */
419 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
420 tcp_listendrop(req->rsk_listener);
421 }
422 reqsk_put(req);
423}
424EXPORT_SYMBOL(tcp_req_err);
425
426/* TCP-LD (RFC 6069) logic */
427void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
428{
429 struct inet_connection_sock *icsk = inet_csk(sk);
430 struct tcp_sock *tp = tcp_sk(sk);
431 struct sk_buff *skb;
432 s32 remaining;
433 u32 delta_us;
434
435 if (sock_owned_by_user(sk))
436 return;
437
438 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
439 !icsk->icsk_backoff)
440 return;
441
442 skb = tcp_rtx_queue_head(sk);
443 if (WARN_ON_ONCE(!skb))
444 return;
445
446 icsk->icsk_backoff--;
447 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
448 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
449
450 tcp_mstamp_refresh(tp);
451 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
452 remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
453
454 if (remaining > 0) {
455 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
456 remaining, TCP_RTO_MAX);
457 } else {
458 /* RTO revert clocked out retransmission.
459 * Will retransmit now.
460 */
461 tcp_retransmit_timer(sk);
462 }
463}
464EXPORT_SYMBOL(tcp_ld_RTO_revert);
465
466/*
467 * This routine is called by the ICMP module when it gets some
468 * sort of error condition. If err < 0 then the socket should
469 * be closed and the error returned to the user. If err > 0
470 * it's just the icmp type << 8 | icmp code. After adjustment
471 * header points to the first 8 bytes of the tcp header. We need
472 * to find the appropriate port.
473 *
474 * The locking strategy used here is very "optimistic". When
475 * someone else accesses the socket the ICMP is just dropped
476 * and for some paths there is no check at all.
477 * A more general error queue to queue errors for later handling
478 * is probably better.
479 *
480 */
481
482int tcp_v4_err(struct sk_buff *skb, u32 info)
483{
484 const struct iphdr *iph = (const struct iphdr *)skb->data;
485 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
486 struct tcp_sock *tp;
487 const int type = icmp_hdr(skb)->type;
488 const int code = icmp_hdr(skb)->code;
489 struct sock *sk;
490 struct request_sock *fastopen;
491 u32 seq, snd_una;
492 int err;
493 struct net *net = dev_net(skb->dev);
494
495 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
496 iph->daddr, th->dest, iph->saddr,
497 ntohs(th->source), inet_iif(skb), 0);
498 if (!sk) {
499 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
500 return -ENOENT;
501 }
502 if (sk->sk_state == TCP_TIME_WAIT) {
503 /* To increase the counter of ignored icmps for TCP-AO */
504 tcp_ao_ignore_icmp(sk, AF_INET, type, code);
505 inet_twsk_put(inet_twsk(sk));
506 return 0;
507 }
508 seq = ntohl(th->seq);
509 if (sk->sk_state == TCP_NEW_SYN_RECV) {
510 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
511 type == ICMP_TIME_EXCEEDED ||
512 (type == ICMP_DEST_UNREACH &&
513 (code == ICMP_NET_UNREACH ||
514 code == ICMP_HOST_UNREACH)));
515 return 0;
516 }
517
518 if (tcp_ao_ignore_icmp(sk, AF_INET, type, code)) {
519 sock_put(sk);
520 return 0;
521 }
522
523 bh_lock_sock(sk);
524 /* If too many ICMPs get dropped on busy
525 * servers this needs to be solved differently.
526 * We do take care of PMTU discovery (RFC1191) special case :
527 * we can receive locally generated ICMP messages while socket is held.
528 */
529 if (sock_owned_by_user(sk)) {
530 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
531 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
532 }
533 if (sk->sk_state == TCP_CLOSE)
534 goto out;
535
536 if (static_branch_unlikely(&ip4_min_ttl)) {
537 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
538 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
539 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
540 goto out;
541 }
542 }
543
544 tp = tcp_sk(sk);
545 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
546 fastopen = rcu_dereference(tp->fastopen_rsk);
547 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
548 if (sk->sk_state != TCP_LISTEN &&
549 !between(seq, snd_una, tp->snd_nxt)) {
550 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
551 goto out;
552 }
553
554 switch (type) {
555 case ICMP_REDIRECT:
556 if (!sock_owned_by_user(sk))
557 do_redirect(skb, sk);
558 goto out;
559 case ICMP_SOURCE_QUENCH:
560 /* Just silently ignore these. */
561 goto out;
562 case ICMP_PARAMETERPROB:
563 err = EPROTO;
564 break;
565 case ICMP_DEST_UNREACH:
566 if (code > NR_ICMP_UNREACH)
567 goto out;
568
569 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
570 /* We are not interested in TCP_LISTEN and open_requests
571 * (SYN-ACKs send out by Linux are always <576bytes so
572 * they should go through unfragmented).
573 */
574 if (sk->sk_state == TCP_LISTEN)
575 goto out;
576
577 WRITE_ONCE(tp->mtu_info, info);
578 if (!sock_owned_by_user(sk)) {
579 tcp_v4_mtu_reduced(sk);
580 } else {
581 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
582 sock_hold(sk);
583 }
584 goto out;
585 }
586
587 err = icmp_err_convert[code].errno;
588 /* check if this ICMP message allows revert of backoff.
589 * (see RFC 6069)
590 */
591 if (!fastopen &&
592 (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
593 tcp_ld_RTO_revert(sk, seq);
594 break;
595 case ICMP_TIME_EXCEEDED:
596 err = EHOSTUNREACH;
597 break;
598 default:
599 goto out;
600 }
601
602 switch (sk->sk_state) {
603 case TCP_SYN_SENT:
604 case TCP_SYN_RECV:
605 /* Only in fast or simultaneous open. If a fast open socket is
606 * already accepted it is treated as a connected one below.
607 */
608 if (fastopen && !fastopen->sk)
609 break;
610
611 ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
612
613 if (!sock_owned_by_user(sk)) {
614 WRITE_ONCE(sk->sk_err, err);
615
616 sk_error_report(sk);
617
618 tcp_done(sk);
619 } else {
620 WRITE_ONCE(sk->sk_err_soft, err);
621 }
622 goto out;
623 }
624
625 /* If we've already connected we will keep trying
626 * until we time out, or the user gives up.
627 *
628 * rfc1122 4.2.3.9 allows to consider as hard errors
629 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
630 * but it is obsoleted by pmtu discovery).
631 *
632 * Note, that in modern internet, where routing is unreliable
633 * and in each dark corner broken firewalls sit, sending random
634 * errors ordered by their masters even this two messages finally lose
635 * their original sense (even Linux sends invalid PORT_UNREACHs)
636 *
637 * Now we are in compliance with RFCs.
638 * --ANK (980905)
639 */
640
641 if (!sock_owned_by_user(sk) &&
642 inet_test_bit(RECVERR, sk)) {
643 WRITE_ONCE(sk->sk_err, err);
644 sk_error_report(sk);
645 } else { /* Only an error on timeout */
646 WRITE_ONCE(sk->sk_err_soft, err);
647 }
648
649out:
650 bh_unlock_sock(sk);
651 sock_put(sk);
652 return 0;
653}
654
655void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
656{
657 struct tcphdr *th = tcp_hdr(skb);
658
659 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
660 skb->csum_start = skb_transport_header(skb) - skb->head;
661 skb->csum_offset = offsetof(struct tcphdr, check);
662}
663
664/* This routine computes an IPv4 TCP checksum. */
665void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
666{
667 const struct inet_sock *inet = inet_sk(sk);
668
669 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
670}
671EXPORT_SYMBOL(tcp_v4_send_check);
672
673#define REPLY_OPTIONS_LEN (MAX_TCP_OPTION_SPACE / sizeof(__be32))
674
675static bool tcp_v4_ao_sign_reset(const struct sock *sk, struct sk_buff *skb,
676 const struct tcp_ao_hdr *aoh,
677 struct ip_reply_arg *arg, struct tcphdr *reply,
678 __be32 reply_options[REPLY_OPTIONS_LEN])
679{
680#ifdef CONFIG_TCP_AO
681 int sdif = tcp_v4_sdif(skb);
682 int dif = inet_iif(skb);
683 int l3index = sdif ? dif : 0;
684 bool allocated_traffic_key;
685 struct tcp_ao_key *key;
686 char *traffic_key;
687 bool drop = true;
688 u32 ao_sne = 0;
689 u8 keyid;
690
691 rcu_read_lock();
692 if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, ntohl(reply->seq),
693 &key, &traffic_key, &allocated_traffic_key,
694 &keyid, &ao_sne))
695 goto out;
696
697 reply_options[0] = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key) << 16) |
698 (aoh->rnext_keyid << 8) | keyid);
699 arg->iov[0].iov_len += tcp_ao_len_aligned(key);
700 reply->doff = arg->iov[0].iov_len / 4;
701
702 if (tcp_ao_hash_hdr(AF_INET, (char *)&reply_options[1],
703 key, traffic_key,
704 (union tcp_ao_addr *)&ip_hdr(skb)->saddr,
705 (union tcp_ao_addr *)&ip_hdr(skb)->daddr,
706 reply, ao_sne))
707 goto out;
708 drop = false;
709out:
710 rcu_read_unlock();
711 if (allocated_traffic_key)
712 kfree(traffic_key);
713 return drop;
714#else
715 return true;
716#endif
717}
718
719/*
720 * This routine will send an RST to the other tcp.
721 *
722 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
723 * for reset.
724 * Answer: if a packet caused RST, it is not for a socket
725 * existing in our system, if it is matched to a socket,
726 * it is just duplicate segment or bug in other side's TCP.
727 * So that we build reply only basing on parameters
728 * arrived with segment.
729 * Exception: precedence violation. We do not implement it in any case.
730 */
731
732static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
733{
734 const struct tcphdr *th = tcp_hdr(skb);
735 struct {
736 struct tcphdr th;
737 __be32 opt[REPLY_OPTIONS_LEN];
738 } rep;
739 const __u8 *md5_hash_location = NULL;
740 const struct tcp_ao_hdr *aoh;
741 struct ip_reply_arg arg;
742#ifdef CONFIG_TCP_MD5SIG
743 struct tcp_md5sig_key *key = NULL;
744 unsigned char newhash[16];
745 struct sock *sk1 = NULL;
746 int genhash;
747#endif
748 u64 transmit_time = 0;
749 struct sock *ctl_sk;
750 struct net *net;
751 u32 txhash = 0;
752
753 /* Never send a reset in response to a reset. */
754 if (th->rst)
755 return;
756
757 /* If sk not NULL, it means we did a successful lookup and incoming
758 * route had to be correct. prequeue might have dropped our dst.
759 */
760 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
761 return;
762
763 /* Swap the send and the receive. */
764 memset(&rep, 0, sizeof(rep));
765 rep.th.dest = th->source;
766 rep.th.source = th->dest;
767 rep.th.doff = sizeof(struct tcphdr) / 4;
768 rep.th.rst = 1;
769
770 if (th->ack) {
771 rep.th.seq = th->ack_seq;
772 } else {
773 rep.th.ack = 1;
774 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
775 skb->len - (th->doff << 2));
776 }
777
778 memset(&arg, 0, sizeof(arg));
779 arg.iov[0].iov_base = (unsigned char *)&rep;
780 arg.iov[0].iov_len = sizeof(rep.th);
781
782 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
783
784 /* Invalid TCP option size or twice included auth */
785 if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh))
786 return;
787
788 if (aoh && tcp_v4_ao_sign_reset(sk, skb, aoh, &arg, &rep.th, rep.opt))
789 return;
790
791#ifdef CONFIG_TCP_MD5SIG
792 rcu_read_lock();
793 if (sk && sk_fullsock(sk)) {
794 const union tcp_md5_addr *addr;
795 int l3index;
796
797 /* sdif set, means packet ingressed via a device
798 * in an L3 domain and inet_iif is set to it.
799 */
800 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
801 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
802 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
803 } else if (md5_hash_location) {
804 const union tcp_md5_addr *addr;
805 int sdif = tcp_v4_sdif(skb);
806 int dif = inet_iif(skb);
807 int l3index;
808
809 /*
810 * active side is lost. Try to find listening socket through
811 * source port, and then find md5 key through listening socket.
812 * we are not loose security here:
813 * Incoming packet is checked with md5 hash with finding key,
814 * no RST generated if md5 hash doesn't match.
815 */
816 sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
817 NULL, 0, ip_hdr(skb)->saddr,
818 th->source, ip_hdr(skb)->daddr,
819 ntohs(th->source), dif, sdif);
820 /* don't send rst if it can't find key */
821 if (!sk1)
822 goto out;
823
824 /* sdif set, means packet ingressed via a device
825 * in an L3 domain and dif is set to it.
826 */
827 l3index = sdif ? dif : 0;
828 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
829 key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
830 if (!key)
831 goto out;
832
833
834 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
835 if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
836 goto out;
837
838 }
839
840 if (key) {
841 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
842 (TCPOPT_NOP << 16) |
843 (TCPOPT_MD5SIG << 8) |
844 TCPOLEN_MD5SIG);
845 /* Update length and the length the header thinks exists */
846 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
847 rep.th.doff = arg.iov[0].iov_len / 4;
848
849 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
850 key, ip_hdr(skb)->saddr,
851 ip_hdr(skb)->daddr, &rep.th);
852 }
853#endif
854 /* Can't co-exist with TCPMD5, hence check rep.opt[0] */
855 if (rep.opt[0] == 0) {
856 __be32 mrst = mptcp_reset_option(skb);
857
858 if (mrst) {
859 rep.opt[0] = mrst;
860 arg.iov[0].iov_len += sizeof(mrst);
861 rep.th.doff = arg.iov[0].iov_len / 4;
862 }
863 }
864
865 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
866 ip_hdr(skb)->saddr, /* XXX */
867 arg.iov[0].iov_len, IPPROTO_TCP, 0);
868 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
869 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
870
871 /* When socket is gone, all binding information is lost.
872 * routing might fail in this case. No choice here, if we choose to force
873 * input interface, we will misroute in case of asymmetric route.
874 */
875 if (sk) {
876 arg.bound_dev_if = sk->sk_bound_dev_if;
877 if (sk_fullsock(sk))
878 trace_tcp_send_reset(sk, skb);
879 }
880
881 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
882 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
883
884 arg.tos = ip_hdr(skb)->tos;
885 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
886 local_bh_disable();
887 ctl_sk = this_cpu_read(ipv4_tcp_sk);
888 sock_net_set(ctl_sk, net);
889 if (sk) {
890 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
891 inet_twsk(sk)->tw_mark : sk->sk_mark;
892 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
893 inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
894 transmit_time = tcp_transmit_time(sk);
895 xfrm_sk_clone_policy(ctl_sk, sk);
896 txhash = (sk->sk_state == TCP_TIME_WAIT) ?
897 inet_twsk(sk)->tw_txhash : sk->sk_txhash;
898 } else {
899 ctl_sk->sk_mark = 0;
900 ctl_sk->sk_priority = 0;
901 }
902 ip_send_unicast_reply(ctl_sk,
903 skb, &TCP_SKB_CB(skb)->header.h4.opt,
904 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
905 &arg, arg.iov[0].iov_len,
906 transmit_time, txhash);
907
908 xfrm_sk_free_policy(ctl_sk);
909 sock_net_set(ctl_sk, &init_net);
910 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
911 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
912 local_bh_enable();
913
914#ifdef CONFIG_TCP_MD5SIG
915out:
916 rcu_read_unlock();
917#endif
918}
919
920/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
921 outside socket context is ugly, certainly. What can I do?
922 */
923
924static void tcp_v4_send_ack(const struct sock *sk,
925 struct sk_buff *skb, u32 seq, u32 ack,
926 u32 win, u32 tsval, u32 tsecr, int oif,
927 struct tcp_key *key,
928 int reply_flags, u8 tos, u32 txhash)
929{
930 const struct tcphdr *th = tcp_hdr(skb);
931 struct {
932 struct tcphdr th;
933 __be32 opt[(MAX_TCP_OPTION_SPACE >> 2)];
934 } rep;
935 struct net *net = sock_net(sk);
936 struct ip_reply_arg arg;
937 struct sock *ctl_sk;
938 u64 transmit_time;
939
940 memset(&rep.th, 0, sizeof(struct tcphdr));
941 memset(&arg, 0, sizeof(arg));
942
943 arg.iov[0].iov_base = (unsigned char *)&rep;
944 arg.iov[0].iov_len = sizeof(rep.th);
945 if (tsecr) {
946 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
947 (TCPOPT_TIMESTAMP << 8) |
948 TCPOLEN_TIMESTAMP);
949 rep.opt[1] = htonl(tsval);
950 rep.opt[2] = htonl(tsecr);
951 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
952 }
953
954 /* Swap the send and the receive. */
955 rep.th.dest = th->source;
956 rep.th.source = th->dest;
957 rep.th.doff = arg.iov[0].iov_len / 4;
958 rep.th.seq = htonl(seq);
959 rep.th.ack_seq = htonl(ack);
960 rep.th.ack = 1;
961 rep.th.window = htons(win);
962
963#ifdef CONFIG_TCP_MD5SIG
964 if (tcp_key_is_md5(key)) {
965 int offset = (tsecr) ? 3 : 0;
966
967 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
968 (TCPOPT_NOP << 16) |
969 (TCPOPT_MD5SIG << 8) |
970 TCPOLEN_MD5SIG);
971 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
972 rep.th.doff = arg.iov[0].iov_len/4;
973
974 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
975 key->md5_key, ip_hdr(skb)->saddr,
976 ip_hdr(skb)->daddr, &rep.th);
977 }
978#endif
979#ifdef CONFIG_TCP_AO
980 if (tcp_key_is_ao(key)) {
981 int offset = (tsecr) ? 3 : 0;
982
983 rep.opt[offset++] = htonl((TCPOPT_AO << 24) |
984 (tcp_ao_len(key->ao_key) << 16) |
985 (key->ao_key->sndid << 8) |
986 key->rcv_next);
987 arg.iov[0].iov_len += tcp_ao_len_aligned(key->ao_key);
988 rep.th.doff = arg.iov[0].iov_len / 4;
989
990 tcp_ao_hash_hdr(AF_INET, (char *)&rep.opt[offset],
991 key->ao_key, key->traffic_key,
992 (union tcp_ao_addr *)&ip_hdr(skb)->saddr,
993 (union tcp_ao_addr *)&ip_hdr(skb)->daddr,
994 &rep.th, key->sne);
995 }
996#endif
997 arg.flags = reply_flags;
998 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
999 ip_hdr(skb)->saddr, /* XXX */
1000 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1001 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
1002 if (oif)
1003 arg.bound_dev_if = oif;
1004 arg.tos = tos;
1005 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
1006 local_bh_disable();
1007 ctl_sk = this_cpu_read(ipv4_tcp_sk);
1008 sock_net_set(ctl_sk, net);
1009 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
1010 inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark);
1011 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
1012 inet_twsk(sk)->tw_priority : READ_ONCE(sk->sk_priority);
1013 transmit_time = tcp_transmit_time(sk);
1014 ip_send_unicast_reply(ctl_sk,
1015 skb, &TCP_SKB_CB(skb)->header.h4.opt,
1016 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
1017 &arg, arg.iov[0].iov_len,
1018 transmit_time, txhash);
1019
1020 sock_net_set(ctl_sk, &init_net);
1021 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
1022 local_bh_enable();
1023}
1024
1025static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
1026{
1027 struct inet_timewait_sock *tw = inet_twsk(sk);
1028 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1029 struct tcp_key key = {};
1030#ifdef CONFIG_TCP_AO
1031 struct tcp_ao_info *ao_info;
1032
1033 if (static_branch_unlikely(&tcp_ao_needed.key)) {
1034 /* FIXME: the segment to-be-acked is not verified yet */
1035 ao_info = rcu_dereference(tcptw->ao_info);
1036 if (ao_info) {
1037 const struct tcp_ao_hdr *aoh;
1038
1039 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) {
1040 inet_twsk_put(tw);
1041 return;
1042 }
1043
1044 if (aoh)
1045 key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
1046 }
1047 }
1048 if (key.ao_key) {
1049 struct tcp_ao_key *rnext_key;
1050
1051 key.traffic_key = snd_other_key(key.ao_key);
1052 key.sne = READ_ONCE(ao_info->snd_sne);
1053 rnext_key = READ_ONCE(ao_info->rnext_key);
1054 key.rcv_next = rnext_key->rcvid;
1055 key.type = TCP_KEY_AO;
1056#else
1057 if (0) {
1058#endif
1059#ifdef CONFIG_TCP_MD5SIG
1060 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1061 key.md5_key = tcp_twsk_md5_key(tcptw);
1062 if (key.md5_key)
1063 key.type = TCP_KEY_MD5;
1064#endif
1065 }
1066
1067 tcp_v4_send_ack(sk, skb,
1068 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1069 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1070 tcp_tw_tsval(tcptw),
1071 tcptw->tw_ts_recent,
1072 tw->tw_bound_dev_if, &key,
1073 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
1074 tw->tw_tos,
1075 tw->tw_txhash);
1076
1077 inet_twsk_put(tw);
1078}
1079
1080static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1081 struct request_sock *req)
1082{
1083 struct tcp_key key = {};
1084
1085 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1086 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1087 */
1088 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
1089 tcp_sk(sk)->snd_nxt;
1090
1091#ifdef CONFIG_TCP_AO
1092 if (static_branch_unlikely(&tcp_ao_needed.key) &&
1093 tcp_rsk_used_ao(req)) {
1094 const union tcp_md5_addr *addr;
1095 const struct tcp_ao_hdr *aoh;
1096 int l3index;
1097
1098 /* Invalid TCP option size or twice included auth */
1099 if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
1100 return;
1101 if (!aoh)
1102 return;
1103
1104 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
1105 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
1106 key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET,
1107 aoh->rnext_keyid, -1);
1108 if (unlikely(!key.ao_key)) {
1109 /* Send ACK with any matching MKT for the peer */
1110 key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, -1, -1);
1111 /* Matching key disappeared (user removed the key?)
1112 * let the handshake timeout.
1113 */
1114 if (!key.ao_key) {
1115 net_info_ratelimited("TCP-AO key for (%pI4, %d)->(%pI4, %d) suddenly disappeared, won't ACK new connection\n",
1116 addr,
1117 ntohs(tcp_hdr(skb)->source),
1118 &ip_hdr(skb)->daddr,
1119 ntohs(tcp_hdr(skb)->dest));
1120 return;
1121 }
1122 }
1123 key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
1124 if (!key.traffic_key)
1125 return;
1126
1127 key.type = TCP_KEY_AO;
1128 key.rcv_next = aoh->keyid;
1129 tcp_v4_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
1130#else
1131 if (0) {
1132#endif
1133#ifdef CONFIG_TCP_MD5SIG
1134 } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
1135 const union tcp_md5_addr *addr;
1136 int l3index;
1137
1138 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
1139 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
1140 key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1141 if (key.md5_key)
1142 key.type = TCP_KEY_MD5;
1143#endif
1144 }
1145
1146 /* RFC 7323 2.3
1147 * The window field (SEG.WND) of every outgoing segment, with the
1148 * exception of <SYN> segments, MUST be right-shifted by
1149 * Rcv.Wind.Shift bits:
1150 */
1151 tcp_v4_send_ack(sk, skb, seq,
1152 tcp_rsk(req)->rcv_nxt,
1153 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
1154 tcp_rsk_tsval(tcp_rsk(req)),
1155 READ_ONCE(req->ts_recent),
1156 0, &key,
1157 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
1158 ip_hdr(skb)->tos,
1159 READ_ONCE(tcp_rsk(req)->txhash));
1160 if (tcp_key_is_ao(&key))
1161 kfree(key.traffic_key);
1162}
1163
1164/*
1165 * Send a SYN-ACK after having received a SYN.
1166 * This still operates on a request_sock only, not on a big
1167 * socket.
1168 */
1169static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
1170 struct flowi *fl,
1171 struct request_sock *req,
1172 struct tcp_fastopen_cookie *foc,
1173 enum tcp_synack_type synack_type,
1174 struct sk_buff *syn_skb)
1175{
1176 const struct inet_request_sock *ireq = inet_rsk(req);
1177 struct flowi4 fl4;
1178 int err = -1;
1179 struct sk_buff *skb;
1180 u8 tos;
1181
1182 /* First, grab a route. */
1183 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1184 return -1;
1185
1186 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
1187
1188 if (skb) {
1189 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1190
1191 tos = READ_ONCE(inet_sk(sk)->tos);
1192
1193 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1194 tos = (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
1195 (tos & INET_ECN_MASK);
1196
1197 if (!INET_ECN_is_capable(tos) &&
1198 tcp_bpf_ca_needs_ecn((struct sock *)req))
1199 tos |= INET_ECN_ECT_0;
1200
1201 rcu_read_lock();
1202 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
1203 ireq->ir_rmt_addr,
1204 rcu_dereference(ireq->ireq_opt),
1205 tos);
1206 rcu_read_unlock();
1207 err = net_xmit_eval(err);
1208 }
1209
1210 return err;
1211}
1212
1213/*
1214 * IPv4 request_sock destructor.
1215 */
1216static void tcp_v4_reqsk_destructor(struct request_sock *req)
1217{
1218 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1219}
1220
1221#ifdef CONFIG_TCP_MD5SIG
1222/*
1223 * RFC2385 MD5 checksumming requires a mapping of
1224 * IP address->MD5 Key.
1225 * We need to maintain these in the sk structure.
1226 */
1227
1228DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ);
1229EXPORT_SYMBOL(tcp_md5_needed);
1230
1231static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1232{
1233 if (!old)
1234 return true;
1235
1236 /* l3index always overrides non-l3index */
1237 if (old->l3index && new->l3index == 0)
1238 return false;
1239 if (old->l3index == 0 && new->l3index)
1240 return true;
1241
1242 return old->prefixlen < new->prefixlen;
1243}
1244
1245/* Find the Key structure for an address. */
1246struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1247 const union tcp_md5_addr *addr,
1248 int family, bool any_l3index)
1249{
1250 const struct tcp_sock *tp = tcp_sk(sk);
1251 struct tcp_md5sig_key *key;
1252 const struct tcp_md5sig_info *md5sig;
1253 __be32 mask;
1254 struct tcp_md5sig_key *best_match = NULL;
1255 bool match;
1256
1257 /* caller either holds rcu_read_lock() or socket lock */
1258 md5sig = rcu_dereference_check(tp->md5sig_info,
1259 lockdep_sock_is_held(sk));
1260 if (!md5sig)
1261 return NULL;
1262
1263 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1264 lockdep_sock_is_held(sk)) {
1265 if (key->family != family)
1266 continue;
1267 if (!any_l3index && key->flags & TCP_MD5SIG_FLAG_IFINDEX &&
1268 key->l3index != l3index)
1269 continue;
1270 if (family == AF_INET) {
1271 mask = inet_make_mask(key->prefixlen);
1272 match = (key->addr.a4.s_addr & mask) ==
1273 (addr->a4.s_addr & mask);
1274#if IS_ENABLED(CONFIG_IPV6)
1275 } else if (family == AF_INET6) {
1276 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1277 key->prefixlen);
1278#endif
1279 } else {
1280 match = false;
1281 }
1282
1283 if (match && better_md5_match(best_match, key))
1284 best_match = key;
1285 }
1286 return best_match;
1287}
1288EXPORT_SYMBOL(__tcp_md5_do_lookup);
1289
1290static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1291 const union tcp_md5_addr *addr,
1292 int family, u8 prefixlen,
1293 int l3index, u8 flags)
1294{
1295 const struct tcp_sock *tp = tcp_sk(sk);
1296 struct tcp_md5sig_key *key;
1297 unsigned int size = sizeof(struct in_addr);
1298 const struct tcp_md5sig_info *md5sig;
1299
1300 /* caller either holds rcu_read_lock() or socket lock */
1301 md5sig = rcu_dereference_check(tp->md5sig_info,
1302 lockdep_sock_is_held(sk));
1303 if (!md5sig)
1304 return NULL;
1305#if IS_ENABLED(CONFIG_IPV6)
1306 if (family == AF_INET6)
1307 size = sizeof(struct in6_addr);
1308#endif
1309 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1310 lockdep_sock_is_held(sk)) {
1311 if (key->family != family)
1312 continue;
1313 if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
1314 continue;
1315 if (key->l3index != l3index)
1316 continue;
1317 if (!memcmp(&key->addr, addr, size) &&
1318 key->prefixlen == prefixlen)
1319 return key;
1320 }
1321 return NULL;
1322}
1323
1324struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1325 const struct sock *addr_sk)
1326{
1327 const union tcp_md5_addr *addr;
1328 int l3index;
1329
1330 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1331 addr_sk->sk_bound_dev_if);
1332 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1333 return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1334}
1335EXPORT_SYMBOL(tcp_v4_md5_lookup);
1336
1337static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
1338{
1339 struct tcp_sock *tp = tcp_sk(sk);
1340 struct tcp_md5sig_info *md5sig;
1341
1342 md5sig = kmalloc(sizeof(*md5sig), gfp);
1343 if (!md5sig)
1344 return -ENOMEM;
1345
1346 sk_gso_disable(sk);
1347 INIT_HLIST_HEAD(&md5sig->head);
1348 rcu_assign_pointer(tp->md5sig_info, md5sig);
1349 return 0;
1350}
1351
1352/* This can be called on a newly created socket, from other files */
1353static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1354 int family, u8 prefixlen, int l3index, u8 flags,
1355 const u8 *newkey, u8 newkeylen, gfp_t gfp)
1356{
1357 /* Add Key to the list */
1358 struct tcp_md5sig_key *key;
1359 struct tcp_sock *tp = tcp_sk(sk);
1360 struct tcp_md5sig_info *md5sig;
1361
1362 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1363 if (key) {
1364 /* Pre-existing entry - just update that one.
1365 * Note that the key might be used concurrently.
1366 * data_race() is telling kcsan that we do not care of
1367 * key mismatches, since changing MD5 key on live flows
1368 * can lead to packet drops.
1369 */
1370 data_race(memcpy(key->key, newkey, newkeylen));
1371
1372 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1373 * Also note that a reader could catch new key->keylen value
1374 * but old key->key[], this is the reason we use __GFP_ZERO
1375 * at sock_kmalloc() time below these lines.
1376 */
1377 WRITE_ONCE(key->keylen, newkeylen);
1378
1379 return 0;
1380 }
1381
1382 md5sig = rcu_dereference_protected(tp->md5sig_info,
1383 lockdep_sock_is_held(sk));
1384
1385 key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1386 if (!key)
1387 return -ENOMEM;
1388
1389 memcpy(key->key, newkey, newkeylen);
1390 key->keylen = newkeylen;
1391 key->family = family;
1392 key->prefixlen = prefixlen;
1393 key->l3index = l3index;
1394 key->flags = flags;
1395 memcpy(&key->addr, addr,
1396 (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
1397 sizeof(struct in_addr));
1398 hlist_add_head_rcu(&key->node, &md5sig->head);
1399 return 0;
1400}
1401
1402int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1403 int family, u8 prefixlen, int l3index, u8 flags,
1404 const u8 *newkey, u8 newkeylen)
1405{
1406 struct tcp_sock *tp = tcp_sk(sk);
1407
1408 if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1409 if (tcp_md5_alloc_sigpool())
1410 return -ENOMEM;
1411
1412 if (tcp_md5sig_info_add(sk, GFP_KERNEL)) {
1413 tcp_md5_release_sigpool();
1414 return -ENOMEM;
1415 }
1416
1417 if (!static_branch_inc(&tcp_md5_needed.key)) {
1418 struct tcp_md5sig_info *md5sig;
1419
1420 md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1421 rcu_assign_pointer(tp->md5sig_info, NULL);
1422 kfree_rcu(md5sig, rcu);
1423 tcp_md5_release_sigpool();
1424 return -EUSERS;
1425 }
1426 }
1427
1428 return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags,
1429 newkey, newkeylen, GFP_KERNEL);
1430}
1431EXPORT_SYMBOL(tcp_md5_do_add);
1432
1433int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1434 int family, u8 prefixlen, int l3index,
1435 struct tcp_md5sig_key *key)
1436{
1437 struct tcp_sock *tp = tcp_sk(sk);
1438
1439 if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1440 tcp_md5_add_sigpool();
1441
1442 if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC))) {
1443 tcp_md5_release_sigpool();
1444 return -ENOMEM;
1445 }
1446
1447 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
1448 struct tcp_md5sig_info *md5sig;
1449
1450 md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1451 net_warn_ratelimited("Too many TCP-MD5 keys in the system\n");
1452 rcu_assign_pointer(tp->md5sig_info, NULL);
1453 kfree_rcu(md5sig, rcu);
1454 tcp_md5_release_sigpool();
1455 return -EUSERS;
1456 }
1457 }
1458
1459 return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index,
1460 key->flags, key->key, key->keylen,
1461 sk_gfp_mask(sk, GFP_ATOMIC));
1462}
1463EXPORT_SYMBOL(tcp_md5_key_copy);
1464
1465int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1466 u8 prefixlen, int l3index, u8 flags)
1467{
1468 struct tcp_md5sig_key *key;
1469
1470 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1471 if (!key)
1472 return -ENOENT;
1473 hlist_del_rcu(&key->node);
1474 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1475 kfree_rcu(key, rcu);
1476 return 0;
1477}
1478EXPORT_SYMBOL(tcp_md5_do_del);
1479
1480void tcp_clear_md5_list(struct sock *sk)
1481{
1482 struct tcp_sock *tp = tcp_sk(sk);
1483 struct tcp_md5sig_key *key;
1484 struct hlist_node *n;
1485 struct tcp_md5sig_info *md5sig;
1486
1487 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1488
1489 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1490 hlist_del_rcu(&key->node);
1491 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1492 kfree_rcu(key, rcu);
1493 }
1494}
1495
1496static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1497 sockptr_t optval, int optlen)
1498{
1499 struct tcp_md5sig cmd;
1500 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1501 const union tcp_md5_addr *addr;
1502 u8 prefixlen = 32;
1503 int l3index = 0;
1504 bool l3flag;
1505 u8 flags;
1506
1507 if (optlen < sizeof(cmd))
1508 return -EINVAL;
1509
1510 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1511 return -EFAULT;
1512
1513 if (sin->sin_family != AF_INET)
1514 return -EINVAL;
1515
1516 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1517 l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1518
1519 if (optname == TCP_MD5SIG_EXT &&
1520 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1521 prefixlen = cmd.tcpm_prefixlen;
1522 if (prefixlen > 32)
1523 return -EINVAL;
1524 }
1525
1526 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
1527 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1528 struct net_device *dev;
1529
1530 rcu_read_lock();
1531 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1532 if (dev && netif_is_l3_master(dev))
1533 l3index = dev->ifindex;
1534
1535 rcu_read_unlock();
1536
1537 /* ok to reference set/not set outside of rcu;
1538 * right now device MUST be an L3 master
1539 */
1540 if (!dev || !l3index)
1541 return -EINVAL;
1542 }
1543
1544 addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1545
1546 if (!cmd.tcpm_keylen)
1547 return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
1548
1549 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1550 return -EINVAL;
1551
1552 /* Don't allow keys for peers that have a matching TCP-AO key.
1553 * See the comment in tcp_ao_add_cmd()
1554 */
1555 if (tcp_ao_required(sk, addr, AF_INET, l3flag ? l3index : -1, false))
1556 return -EKEYREJECTED;
1557
1558 return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
1559 cmd.tcpm_key, cmd.tcpm_keylen);
1560}
1561
1562static int tcp_v4_md5_hash_headers(struct tcp_sigpool *hp,
1563 __be32 daddr, __be32 saddr,
1564 const struct tcphdr *th, int nbytes)
1565{
1566 struct tcp4_pseudohdr *bp;
1567 struct scatterlist sg;
1568 struct tcphdr *_th;
1569
1570 bp = hp->scratch;
1571 bp->saddr = saddr;
1572 bp->daddr = daddr;
1573 bp->pad = 0;
1574 bp->protocol = IPPROTO_TCP;
1575 bp->len = cpu_to_be16(nbytes);
1576
1577 _th = (struct tcphdr *)(bp + 1);
1578 memcpy(_th, th, sizeof(*th));
1579 _th->check = 0;
1580
1581 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1582 ahash_request_set_crypt(hp->req, &sg, NULL,
1583 sizeof(*bp) + sizeof(*th));
1584 return crypto_ahash_update(hp->req);
1585}
1586
1587static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1588 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1589{
1590 struct tcp_sigpool hp;
1591
1592 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
1593 goto clear_hash_nostart;
1594
1595 if (crypto_ahash_init(hp.req))
1596 goto clear_hash;
1597 if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2))
1598 goto clear_hash;
1599 if (tcp_md5_hash_key(&hp, key))
1600 goto clear_hash;
1601 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
1602 if (crypto_ahash_final(hp.req))
1603 goto clear_hash;
1604
1605 tcp_sigpool_end(&hp);
1606 return 0;
1607
1608clear_hash:
1609 tcp_sigpool_end(&hp);
1610clear_hash_nostart:
1611 memset(md5_hash, 0, 16);
1612 return 1;
1613}
1614
1615int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1616 const struct sock *sk,
1617 const struct sk_buff *skb)
1618{
1619 const struct tcphdr *th = tcp_hdr(skb);
1620 struct tcp_sigpool hp;
1621 __be32 saddr, daddr;
1622
1623 if (sk) { /* valid for establish/request sockets */
1624 saddr = sk->sk_rcv_saddr;
1625 daddr = sk->sk_daddr;
1626 } else {
1627 const struct iphdr *iph = ip_hdr(skb);
1628 saddr = iph->saddr;
1629 daddr = iph->daddr;
1630 }
1631
1632 if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp))
1633 goto clear_hash_nostart;
1634
1635 if (crypto_ahash_init(hp.req))
1636 goto clear_hash;
1637
1638 if (tcp_v4_md5_hash_headers(&hp, daddr, saddr, th, skb->len))
1639 goto clear_hash;
1640 if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2))
1641 goto clear_hash;
1642 if (tcp_md5_hash_key(&hp, key))
1643 goto clear_hash;
1644 ahash_request_set_crypt(hp.req, NULL, md5_hash, 0);
1645 if (crypto_ahash_final(hp.req))
1646 goto clear_hash;
1647
1648 tcp_sigpool_end(&hp);
1649 return 0;
1650
1651clear_hash:
1652 tcp_sigpool_end(&hp);
1653clear_hash_nostart:
1654 memset(md5_hash, 0, 16);
1655 return 1;
1656}
1657EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1658
1659#endif
1660
1661static void tcp_v4_init_req(struct request_sock *req,
1662 const struct sock *sk_listener,
1663 struct sk_buff *skb)
1664{
1665 struct inet_request_sock *ireq = inet_rsk(req);
1666 struct net *net = sock_net(sk_listener);
1667
1668 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1669 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1670 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1671}
1672
1673static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1674 struct sk_buff *skb,
1675 struct flowi *fl,
1676 struct request_sock *req)
1677{
1678 tcp_v4_init_req(req, sk, skb);
1679
1680 if (security_inet_conn_request(sk, skb, req))
1681 return NULL;
1682
1683 return inet_csk_route_req(sk, &fl->u.ip4, req);
1684}
1685
1686struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1687 .family = PF_INET,
1688 .obj_size = sizeof(struct tcp_request_sock),
1689 .rtx_syn_ack = tcp_rtx_synack,
1690 .send_ack = tcp_v4_reqsk_send_ack,
1691 .destructor = tcp_v4_reqsk_destructor,
1692 .send_reset = tcp_v4_send_reset,
1693 .syn_ack_timeout = tcp_syn_ack_timeout,
1694};
1695
1696const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1697 .mss_clamp = TCP_MSS_DEFAULT,
1698#ifdef CONFIG_TCP_MD5SIG
1699 .req_md5_lookup = tcp_v4_md5_lookup,
1700 .calc_md5_hash = tcp_v4_md5_hash_skb,
1701#endif
1702#ifdef CONFIG_TCP_AO
1703 .ao_lookup = tcp_v4_ao_lookup_rsk,
1704 .ao_calc_key = tcp_v4_ao_calc_key_rsk,
1705 .ao_synack_hash = tcp_v4_ao_synack_hash,
1706#endif
1707#ifdef CONFIG_SYN_COOKIES
1708 .cookie_init_seq = cookie_v4_init_sequence,
1709#endif
1710 .route_req = tcp_v4_route_req,
1711 .init_seq = tcp_v4_init_seq,
1712 .init_ts_off = tcp_v4_init_ts_off,
1713 .send_synack = tcp_v4_send_synack,
1714};
1715
1716int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1717{
1718 /* Never answer to SYNs send to broadcast or multicast */
1719 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1720 goto drop;
1721
1722 return tcp_conn_request(&tcp_request_sock_ops,
1723 &tcp_request_sock_ipv4_ops, sk, skb);
1724
1725drop:
1726 tcp_listendrop(sk);
1727 return 0;
1728}
1729EXPORT_SYMBOL(tcp_v4_conn_request);
1730
1731
1732/*
1733 * The three way handshake has completed - we got a valid synack -
1734 * now create the new socket.
1735 */
1736struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1737 struct request_sock *req,
1738 struct dst_entry *dst,
1739 struct request_sock *req_unhash,
1740 bool *own_req)
1741{
1742 struct inet_request_sock *ireq;
1743 bool found_dup_sk = false;
1744 struct inet_sock *newinet;
1745 struct tcp_sock *newtp;
1746 struct sock *newsk;
1747#ifdef CONFIG_TCP_MD5SIG
1748 const union tcp_md5_addr *addr;
1749 struct tcp_md5sig_key *key;
1750 int l3index;
1751#endif
1752 struct ip_options_rcu *inet_opt;
1753
1754 if (sk_acceptq_is_full(sk))
1755 goto exit_overflow;
1756
1757 newsk = tcp_create_openreq_child(sk, req, skb);
1758 if (!newsk)
1759 goto exit_nonewsk;
1760
1761 newsk->sk_gso_type = SKB_GSO_TCPV4;
1762 inet_sk_rx_dst_set(newsk, skb);
1763
1764 newtp = tcp_sk(newsk);
1765 newinet = inet_sk(newsk);
1766 ireq = inet_rsk(req);
1767 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1768 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1769 newsk->sk_bound_dev_if = ireq->ir_iif;
1770 newinet->inet_saddr = ireq->ir_loc_addr;
1771 inet_opt = rcu_dereference(ireq->ireq_opt);
1772 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1773 newinet->mc_index = inet_iif(skb);
1774 newinet->mc_ttl = ip_hdr(skb)->ttl;
1775 newinet->rcv_tos = ip_hdr(skb)->tos;
1776 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1777 if (inet_opt)
1778 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1779 atomic_set(&newinet->inet_id, get_random_u16());
1780
1781 /* Set ToS of the new socket based upon the value of incoming SYN.
1782 * ECT bits are set later in tcp_init_transfer().
1783 */
1784 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1785 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1786
1787 if (!dst) {
1788 dst = inet_csk_route_child_sock(sk, newsk, req);
1789 if (!dst)
1790 goto put_and_exit;
1791 } else {
1792 /* syncookie case : see end of cookie_v4_check() */
1793 }
1794 sk_setup_caps(newsk, dst);
1795
1796 tcp_ca_openreq_child(newsk, dst);
1797
1798 tcp_sync_mss(newsk, dst_mtu(dst));
1799 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1800
1801 tcp_initialize_rcv_mss(newsk);
1802
1803#ifdef CONFIG_TCP_MD5SIG
1804 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1805 /* Copy over the MD5 key from the original socket */
1806 addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1807 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1808 if (key && !tcp_rsk_used_ao(req)) {
1809 if (tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key))
1810 goto put_and_exit;
1811 sk_gso_disable(newsk);
1812 }
1813#endif
1814#ifdef CONFIG_TCP_AO
1815 if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET))
1816 goto put_and_exit; /* OOM, release back memory */
1817#endif
1818
1819 if (__inet_inherit_port(sk, newsk) < 0)
1820 goto put_and_exit;
1821 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1822 &found_dup_sk);
1823 if (likely(*own_req)) {
1824 tcp_move_syn(newtp, req);
1825 ireq->ireq_opt = NULL;
1826 } else {
1827 newinet->inet_opt = NULL;
1828
1829 if (!req_unhash && found_dup_sk) {
1830 /* This code path should only be executed in the
1831 * syncookie case only
1832 */
1833 bh_unlock_sock(newsk);
1834 sock_put(newsk);
1835 newsk = NULL;
1836 }
1837 }
1838 return newsk;
1839
1840exit_overflow:
1841 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1842exit_nonewsk:
1843 dst_release(dst);
1844exit:
1845 tcp_listendrop(sk);
1846 return NULL;
1847put_and_exit:
1848 newinet->inet_opt = NULL;
1849 inet_csk_prepare_forced_close(newsk);
1850 tcp_done(newsk);
1851 goto exit;
1852}
1853EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1854
1855static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1856{
1857#ifdef CONFIG_SYN_COOKIES
1858 const struct tcphdr *th = tcp_hdr(skb);
1859
1860 if (!th->syn)
1861 sk = cookie_v4_check(sk, skb);
1862#endif
1863 return sk;
1864}
1865
1866u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1867 struct tcphdr *th, u32 *cookie)
1868{
1869 u16 mss = 0;
1870#ifdef CONFIG_SYN_COOKIES
1871 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1872 &tcp_request_sock_ipv4_ops, sk, th);
1873 if (mss) {
1874 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1875 tcp_synq_overflow(sk);
1876 }
1877#endif
1878 return mss;
1879}
1880
1881INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1882 u32));
1883/* The socket must have it's spinlock held when we get
1884 * here, unless it is a TCP_LISTEN socket.
1885 *
1886 * We have a potential double-lock case here, so even when
1887 * doing backlog processing we use the BH locking scheme.
1888 * This is because we cannot sleep with the original spinlock
1889 * held.
1890 */
1891int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1892{
1893 enum skb_drop_reason reason;
1894 struct sock *rsk;
1895
1896 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1897 struct dst_entry *dst;
1898
1899 dst = rcu_dereference_protected(sk->sk_rx_dst,
1900 lockdep_sock_is_held(sk));
1901
1902 sock_rps_save_rxhash(sk, skb);
1903 sk_mark_napi_id(sk, skb);
1904 if (dst) {
1905 if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1906 !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
1907 dst, 0)) {
1908 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1909 dst_release(dst);
1910 }
1911 }
1912 tcp_rcv_established(sk, skb);
1913 return 0;
1914 }
1915
1916 if (tcp_checksum_complete(skb))
1917 goto csum_err;
1918
1919 if (sk->sk_state == TCP_LISTEN) {
1920 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1921
1922 if (!nsk)
1923 return 0;
1924 if (nsk != sk) {
1925 reason = tcp_child_process(sk, nsk, skb);
1926 if (reason) {
1927 rsk = nsk;
1928 goto reset;
1929 }
1930 return 0;
1931 }
1932 } else
1933 sock_rps_save_rxhash(sk, skb);
1934
1935 reason = tcp_rcv_state_process(sk, skb);
1936 if (reason) {
1937 rsk = sk;
1938 goto reset;
1939 }
1940 return 0;
1941
1942reset:
1943 tcp_v4_send_reset(rsk, skb);
1944discard:
1945 kfree_skb_reason(skb, reason);
1946 /* Be careful here. If this function gets more complicated and
1947 * gcc suffers from register pressure on the x86, sk (in %ebx)
1948 * might be destroyed here. This current version compiles correctly,
1949 * but you have been warned.
1950 */
1951 return 0;
1952
1953csum_err:
1954 reason = SKB_DROP_REASON_TCP_CSUM;
1955 trace_tcp_bad_csum(skb);
1956 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1957 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1958 goto discard;
1959}
1960EXPORT_SYMBOL(tcp_v4_do_rcv);
1961
1962int tcp_v4_early_demux(struct sk_buff *skb)
1963{
1964 struct net *net = dev_net(skb->dev);
1965 const struct iphdr *iph;
1966 const struct tcphdr *th;
1967 struct sock *sk;
1968
1969 if (skb->pkt_type != PACKET_HOST)
1970 return 0;
1971
1972 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1973 return 0;
1974
1975 iph = ip_hdr(skb);
1976 th = tcp_hdr(skb);
1977
1978 if (th->doff < sizeof(struct tcphdr) / 4)
1979 return 0;
1980
1981 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1982 iph->saddr, th->source,
1983 iph->daddr, ntohs(th->dest),
1984 skb->skb_iif, inet_sdif(skb));
1985 if (sk) {
1986 skb->sk = sk;
1987 skb->destructor = sock_edemux;
1988 if (sk_fullsock(sk)) {
1989 struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1990
1991 if (dst)
1992 dst = dst_check(dst, 0);
1993 if (dst &&
1994 sk->sk_rx_dst_ifindex == skb->skb_iif)
1995 skb_dst_set_noref(skb, dst);
1996 }
1997 }
1998 return 0;
1999}
2000
2001bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
2002 enum skb_drop_reason *reason)
2003{
2004 u32 tail_gso_size, tail_gso_segs;
2005 struct skb_shared_info *shinfo;
2006 const struct tcphdr *th;
2007 struct tcphdr *thtail;
2008 struct sk_buff *tail;
2009 unsigned int hdrlen;
2010 bool fragstolen;
2011 u32 gso_segs;
2012 u32 gso_size;
2013 u64 limit;
2014 int delta;
2015
2016 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
2017 * we can fix skb->truesize to its real value to avoid future drops.
2018 * This is valid because skb is not yet charged to the socket.
2019 * It has been noticed pure SACK packets were sometimes dropped
2020 * (if cooked by drivers without copybreak feature).
2021 */
2022 skb_condense(skb);
2023
2024 skb_dst_drop(skb);
2025
2026 if (unlikely(tcp_checksum_complete(skb))) {
2027 bh_unlock_sock(sk);
2028 trace_tcp_bad_csum(skb);
2029 *reason = SKB_DROP_REASON_TCP_CSUM;
2030 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
2031 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
2032 return true;
2033 }
2034
2035 /* Attempt coalescing to last skb in backlog, even if we are
2036 * above the limits.
2037 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
2038 */
2039 th = (const struct tcphdr *)skb->data;
2040 hdrlen = th->doff * 4;
2041
2042 tail = sk->sk_backlog.tail;
2043 if (!tail)
2044 goto no_coalesce;
2045 thtail = (struct tcphdr *)tail->data;
2046
2047 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
2048 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
2049 ((TCP_SKB_CB(tail)->tcp_flags |
2050 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
2051 !((TCP_SKB_CB(tail)->tcp_flags &
2052 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
2053 ((TCP_SKB_CB(tail)->tcp_flags ^
2054 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
2055#ifdef CONFIG_TLS_DEVICE
2056 tail->decrypted != skb->decrypted ||
2057#endif
2058 !mptcp_skb_can_collapse(tail, skb) ||
2059 thtail->doff != th->doff ||
2060 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
2061 goto no_coalesce;
2062
2063 __skb_pull(skb, hdrlen);
2064
2065 shinfo = skb_shinfo(skb);
2066 gso_size = shinfo->gso_size ?: skb->len;
2067 gso_segs = shinfo->gso_segs ?: 1;
2068
2069 shinfo = skb_shinfo(tail);
2070 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
2071 tail_gso_segs = shinfo->gso_segs ?: 1;
2072
2073 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
2074 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
2075
2076 if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
2077 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
2078 thtail->window = th->window;
2079 }
2080
2081 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
2082 * thtail->fin, so that the fast path in tcp_rcv_established()
2083 * is not entered if we append a packet with a FIN.
2084 * SYN, RST, URG are not present.
2085 * ACK is set on both packets.
2086 * PSH : we do not really care in TCP stack,
2087 * at least for 'GRO' packets.
2088 */
2089 thtail->fin |= th->fin;
2090 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2091
2092 if (TCP_SKB_CB(skb)->has_rxtstamp) {
2093 TCP_SKB_CB(tail)->has_rxtstamp = true;
2094 tail->tstamp = skb->tstamp;
2095 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
2096 }
2097
2098 /* Not as strict as GRO. We only need to carry mss max value */
2099 shinfo->gso_size = max(gso_size, tail_gso_size);
2100 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
2101
2102 sk->sk_backlog.len += delta;
2103 __NET_INC_STATS(sock_net(sk),
2104 LINUX_MIB_TCPBACKLOGCOALESCE);
2105 kfree_skb_partial(skb, fragstolen);
2106 return false;
2107 }
2108 __skb_push(skb, hdrlen);
2109
2110no_coalesce:
2111 /* sk->sk_backlog.len is reset only at the end of __release_sock().
2112 * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach
2113 * sk_rcvbuf in normal conditions.
2114 */
2115 limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1;
2116
2117 limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1;
2118
2119 /* Only socket owner can try to collapse/prune rx queues
2120 * to reduce memory overhead, so add a little headroom here.
2121 * Few sockets backlog are possibly concurrently non empty.
2122 */
2123 limit += 64 * 1024;
2124
2125 limit = min_t(u64, limit, UINT_MAX);
2126
2127 if (unlikely(sk_add_backlog(sk, skb, limit))) {
2128 bh_unlock_sock(sk);
2129 *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
2130 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
2131 return true;
2132 }
2133 return false;
2134}
2135EXPORT_SYMBOL(tcp_add_backlog);
2136
2137int tcp_filter(struct sock *sk, struct sk_buff *skb)
2138{
2139 struct tcphdr *th = (struct tcphdr *)skb->data;
2140
2141 return sk_filter_trim_cap(sk, skb, th->doff * 4);
2142}
2143EXPORT_SYMBOL(tcp_filter);
2144
2145static void tcp_v4_restore_cb(struct sk_buff *skb)
2146{
2147 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
2148 sizeof(struct inet_skb_parm));
2149}
2150
2151static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
2152 const struct tcphdr *th)
2153{
2154 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
2155 * barrier() makes sure compiler wont play fool^Waliasing games.
2156 */
2157 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
2158 sizeof(struct inet_skb_parm));
2159 barrier();
2160
2161 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2162 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2163 skb->len - th->doff * 4);
2164 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2165 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
2166 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
2167 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
2168 TCP_SKB_CB(skb)->sacked = 0;
2169 TCP_SKB_CB(skb)->has_rxtstamp =
2170 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
2171}
2172
2173/*
2174 * From tcp_input.c
2175 */
2176
2177int tcp_v4_rcv(struct sk_buff *skb)
2178{
2179 struct net *net = dev_net(skb->dev);
2180 enum skb_drop_reason drop_reason;
2181 int sdif = inet_sdif(skb);
2182 int dif = inet_iif(skb);
2183 const struct iphdr *iph;
2184 const struct tcphdr *th;
2185 bool refcounted;
2186 struct sock *sk;
2187 int ret;
2188
2189 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
2190 if (skb->pkt_type != PACKET_HOST)
2191 goto discard_it;
2192
2193 /* Count it even if it's bad */
2194 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
2195
2196 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2197 goto discard_it;
2198
2199 th = (const struct tcphdr *)skb->data;
2200
2201 if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
2202 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
2203 goto bad_packet;
2204 }
2205 if (!pskb_may_pull(skb, th->doff * 4))
2206 goto discard_it;
2207
2208 /* An explanation is required here, I think.
2209 * Packet length and doff are validated by header prediction,
2210 * provided case of th->doff==0 is eliminated.
2211 * So, we defer the checks. */
2212
2213 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
2214 goto csum_error;
2215
2216 th = (const struct tcphdr *)skb->data;
2217 iph = ip_hdr(skb);
2218lookup:
2219 sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo,
2220 skb, __tcp_hdrlen(th), th->source,
2221 th->dest, sdif, &refcounted);
2222 if (!sk)
2223 goto no_tcp_socket;
2224
2225process:
2226 if (sk->sk_state == TCP_TIME_WAIT)
2227 goto do_time_wait;
2228
2229 if (sk->sk_state == TCP_NEW_SYN_RECV) {
2230 struct request_sock *req = inet_reqsk(sk);
2231 bool req_stolen = false;
2232 struct sock *nsk;
2233
2234 sk = req->rsk_listener;
2235 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2236 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2237 else
2238 drop_reason = tcp_inbound_hash(sk, req, skb,
2239 &iph->saddr, &iph->daddr,
2240 AF_INET, dif, sdif);
2241 if (unlikely(drop_reason)) {
2242 sk_drops_add(sk, skb);
2243 reqsk_put(req);
2244 goto discard_it;
2245 }
2246 if (tcp_checksum_complete(skb)) {
2247 reqsk_put(req);
2248 goto csum_error;
2249 }
2250 if (unlikely(sk->sk_state != TCP_LISTEN)) {
2251 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
2252 if (!nsk) {
2253 inet_csk_reqsk_queue_drop_and_put(sk, req);
2254 goto lookup;
2255 }
2256 sk = nsk;
2257 /* reuseport_migrate_sock() has already held one sk_refcnt
2258 * before returning.
2259 */
2260 } else {
2261 /* We own a reference on the listener, increase it again
2262 * as we might lose it too soon.
2263 */
2264 sock_hold(sk);
2265 }
2266 refcounted = true;
2267 nsk = NULL;
2268 if (!tcp_filter(sk, skb)) {
2269 th = (const struct tcphdr *)skb->data;
2270 iph = ip_hdr(skb);
2271 tcp_v4_fill_cb(skb, iph, th);
2272 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
2273 } else {
2274 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2275 }
2276 if (!nsk) {
2277 reqsk_put(req);
2278 if (req_stolen) {
2279 /* Another cpu got exclusive access to req
2280 * and created a full blown socket.
2281 * Try to feed this packet to this socket
2282 * instead of discarding it.
2283 */
2284 tcp_v4_restore_cb(skb);
2285 sock_put(sk);
2286 goto lookup;
2287 }
2288 goto discard_and_relse;
2289 }
2290 nf_reset_ct(skb);
2291 if (nsk == sk) {
2292 reqsk_put(req);
2293 tcp_v4_restore_cb(skb);
2294 } else {
2295 drop_reason = tcp_child_process(sk, nsk, skb);
2296 if (drop_reason) {
2297 tcp_v4_send_reset(nsk, skb);
2298 goto discard_and_relse;
2299 }
2300 sock_put(sk);
2301 return 0;
2302 }
2303 }
2304
2305 if (static_branch_unlikely(&ip4_min_ttl)) {
2306 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
2307 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
2308 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2309 drop_reason = SKB_DROP_REASON_TCP_MINTTL;
2310 goto discard_and_relse;
2311 }
2312 }
2313
2314 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
2315 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2316 goto discard_and_relse;
2317 }
2318
2319 drop_reason = tcp_inbound_hash(sk, NULL, skb, &iph->saddr, &iph->daddr,
2320 AF_INET, dif, sdif);
2321 if (drop_reason)
2322 goto discard_and_relse;
2323
2324 nf_reset_ct(skb);
2325
2326 if (tcp_filter(sk, skb)) {
2327 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2328 goto discard_and_relse;
2329 }
2330 th = (const struct tcphdr *)skb->data;
2331 iph = ip_hdr(skb);
2332 tcp_v4_fill_cb(skb, iph, th);
2333
2334 skb->dev = NULL;
2335
2336 if (sk->sk_state == TCP_LISTEN) {
2337 ret = tcp_v4_do_rcv(sk, skb);
2338 goto put_and_return;
2339 }
2340
2341 sk_incoming_cpu_update(sk);
2342
2343 bh_lock_sock_nested(sk);
2344 tcp_segs_in(tcp_sk(sk), skb);
2345 ret = 0;
2346 if (!sock_owned_by_user(sk)) {
2347 ret = tcp_v4_do_rcv(sk, skb);
2348 } else {
2349 if (tcp_add_backlog(sk, skb, &drop_reason))
2350 goto discard_and_relse;
2351 }
2352 bh_unlock_sock(sk);
2353
2354put_and_return:
2355 if (refcounted)
2356 sock_put(sk);
2357
2358 return ret;
2359
2360no_tcp_socket:
2361 drop_reason = SKB_DROP_REASON_NO_SOCKET;
2362 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2363 goto discard_it;
2364
2365 tcp_v4_fill_cb(skb, iph, th);
2366
2367 if (tcp_checksum_complete(skb)) {
2368csum_error:
2369 drop_reason = SKB_DROP_REASON_TCP_CSUM;
2370 trace_tcp_bad_csum(skb);
2371 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2372bad_packet:
2373 __TCP_INC_STATS(net, TCP_MIB_INERRS);
2374 } else {
2375 tcp_v4_send_reset(NULL, skb);
2376 }
2377
2378discard_it:
2379 SKB_DR_OR(drop_reason, NOT_SPECIFIED);
2380 /* Discard frame. */
2381 kfree_skb_reason(skb, drop_reason);
2382 return 0;
2383
2384discard_and_relse:
2385 sk_drops_add(sk, skb);
2386 if (refcounted)
2387 sock_put(sk);
2388 goto discard_it;
2389
2390do_time_wait:
2391 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2392 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2393 inet_twsk_put(inet_twsk(sk));
2394 goto discard_it;
2395 }
2396
2397 tcp_v4_fill_cb(skb, iph, th);
2398
2399 if (tcp_checksum_complete(skb)) {
2400 inet_twsk_put(inet_twsk(sk));
2401 goto csum_error;
2402 }
2403 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2404 case TCP_TW_SYN: {
2405 struct sock *sk2 = inet_lookup_listener(net,
2406 net->ipv4.tcp_death_row.hashinfo,
2407 skb, __tcp_hdrlen(th),
2408 iph->saddr, th->source,
2409 iph->daddr, th->dest,
2410 inet_iif(skb),
2411 sdif);
2412 if (sk2) {
2413 inet_twsk_deschedule_put(inet_twsk(sk));
2414 sk = sk2;
2415 tcp_v4_restore_cb(skb);
2416 refcounted = false;
2417 goto process;
2418 }
2419 }
2420 /* to ACK */
2421 fallthrough;
2422 case TCP_TW_ACK:
2423 tcp_v4_timewait_ack(sk, skb);
2424 break;
2425 case TCP_TW_RST:
2426 tcp_v4_send_reset(sk, skb);
2427 inet_twsk_deschedule_put(inet_twsk(sk));
2428 goto discard_it;
2429 case TCP_TW_SUCCESS:;
2430 }
2431 goto discard_it;
2432}
2433
2434static struct timewait_sock_ops tcp_timewait_sock_ops = {
2435 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2436 .twsk_unique = tcp_twsk_unique,
2437 .twsk_destructor= tcp_twsk_destructor,
2438};
2439
2440void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2441{
2442 struct dst_entry *dst = skb_dst(skb);
2443
2444 if (dst && dst_hold_safe(dst)) {
2445 rcu_assign_pointer(sk->sk_rx_dst, dst);
2446 sk->sk_rx_dst_ifindex = skb->skb_iif;
2447 }
2448}
2449EXPORT_SYMBOL(inet_sk_rx_dst_set);
2450
2451const struct inet_connection_sock_af_ops ipv4_specific = {
2452 .queue_xmit = ip_queue_xmit,
2453 .send_check = tcp_v4_send_check,
2454 .rebuild_header = inet_sk_rebuild_header,
2455 .sk_rx_dst_set = inet_sk_rx_dst_set,
2456 .conn_request = tcp_v4_conn_request,
2457 .syn_recv_sock = tcp_v4_syn_recv_sock,
2458 .net_header_len = sizeof(struct iphdr),
2459 .setsockopt = ip_setsockopt,
2460 .getsockopt = ip_getsockopt,
2461 .addr2sockaddr = inet_csk_addr2sockaddr,
2462 .sockaddr_len = sizeof(struct sockaddr_in),
2463 .mtu_reduced = tcp_v4_mtu_reduced,
2464};
2465EXPORT_SYMBOL(ipv4_specific);
2466
2467#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2468static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2469#ifdef CONFIG_TCP_MD5SIG
2470 .md5_lookup = tcp_v4_md5_lookup,
2471 .calc_md5_hash = tcp_v4_md5_hash_skb,
2472 .md5_parse = tcp_v4_parse_md5_keys,
2473#endif
2474#ifdef CONFIG_TCP_AO
2475 .ao_lookup = tcp_v4_ao_lookup,
2476 .calc_ao_hash = tcp_v4_ao_hash_skb,
2477 .ao_parse = tcp_v4_parse_ao,
2478 .ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
2479#endif
2480};
2481#endif
2482
2483/* NOTE: A lot of things set to zero explicitly by call to
2484 * sk_alloc() so need not be done here.
2485 */
2486static int tcp_v4_init_sock(struct sock *sk)
2487{
2488 struct inet_connection_sock *icsk = inet_csk(sk);
2489
2490 tcp_init_sock(sk);
2491
2492 icsk->icsk_af_ops = &ipv4_specific;
2493
2494#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
2495 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2496#endif
2497
2498 return 0;
2499}
2500
2501#ifdef CONFIG_TCP_MD5SIG
2502static void tcp_md5sig_info_free_rcu(struct rcu_head *head)
2503{
2504 struct tcp_md5sig_info *md5sig;
2505
2506 md5sig = container_of(head, struct tcp_md5sig_info, rcu);
2507 kfree(md5sig);
2508 static_branch_slow_dec_deferred(&tcp_md5_needed);
2509 tcp_md5_release_sigpool();
2510}
2511#endif
2512
2513void tcp_v4_destroy_sock(struct sock *sk)
2514{
2515 struct tcp_sock *tp = tcp_sk(sk);
2516
2517 trace_tcp_destroy_sock(sk);
2518
2519 tcp_clear_xmit_timers(sk);
2520
2521 tcp_cleanup_congestion_control(sk);
2522
2523 tcp_cleanup_ulp(sk);
2524
2525 /* Cleanup up the write buffer. */
2526 tcp_write_queue_purge(sk);
2527
2528 /* Check if we want to disable active TFO */
2529 tcp_fastopen_active_disable_ofo_check(sk);
2530
2531 /* Cleans up our, hopefully empty, out_of_order_queue. */
2532 skb_rbtree_purge(&tp->out_of_order_queue);
2533
2534#ifdef CONFIG_TCP_MD5SIG
2535 /* Clean up the MD5 key list, if any */
2536 if (tp->md5sig_info) {
2537 struct tcp_md5sig_info *md5sig;
2538
2539 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
2540 tcp_clear_md5_list(sk);
2541 call_rcu(&md5sig->rcu, tcp_md5sig_info_free_rcu);
2542 rcu_assign_pointer(tp->md5sig_info, NULL);
2543 }
2544#endif
2545 tcp_ao_destroy_sock(sk, false);
2546
2547 /* Clean up a referenced TCP bind bucket. */
2548 if (inet_csk(sk)->icsk_bind_hash)
2549 inet_put_port(sk);
2550
2551 BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2552
2553 /* If socket is aborted during connect operation */
2554 tcp_free_fastopen_req(tp);
2555 tcp_fastopen_destroy_cipher(sk);
2556 tcp_saved_syn_free(tp);
2557
2558 sk_sockets_allocated_dec(sk);
2559}
2560EXPORT_SYMBOL(tcp_v4_destroy_sock);
2561
2562#ifdef CONFIG_PROC_FS
2563/* Proc filesystem TCP sock list dumping. */
2564
2565static unsigned short seq_file_family(const struct seq_file *seq);
2566
2567static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
2568{
2569 unsigned short family = seq_file_family(seq);
2570
2571 /* AF_UNSPEC is used as a match all */
2572 return ((family == AF_UNSPEC || family == sk->sk_family) &&
2573 net_eq(sock_net(sk), seq_file_net(seq)));
2574}
2575
2576/* Find a non empty bucket (starting from st->bucket)
2577 * and return the first sk from it.
2578 */
2579static void *listening_get_first(struct seq_file *seq)
2580{
2581 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2582 struct tcp_iter_state *st = seq->private;
2583
2584 st->offset = 0;
2585 for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) {
2586 struct inet_listen_hashbucket *ilb2;
2587 struct hlist_nulls_node *node;
2588 struct sock *sk;
2589
2590 ilb2 = &hinfo->lhash2[st->bucket];
2591 if (hlist_nulls_empty(&ilb2->nulls_head))
2592 continue;
2593
2594 spin_lock(&ilb2->lock);
2595 sk_nulls_for_each(sk, node, &ilb2->nulls_head) {
2596 if (seq_sk_match(seq, sk))
2597 return sk;
2598 }
2599 spin_unlock(&ilb2->lock);
2600 }
2601
2602 return NULL;
2603}
2604
2605/* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2606 * If "cur" is the last one in the st->bucket,
2607 * call listening_get_first() to return the first sk of the next
2608 * non empty bucket.
2609 */
2610static void *listening_get_next(struct seq_file *seq, void *cur)
2611{
2612 struct tcp_iter_state *st = seq->private;
2613 struct inet_listen_hashbucket *ilb2;
2614 struct hlist_nulls_node *node;
2615 struct inet_hashinfo *hinfo;
2616 struct sock *sk = cur;
2617
2618 ++st->num;
2619 ++st->offset;
2620
2621 sk = sk_nulls_next(sk);
2622 sk_nulls_for_each_from(sk, node) {
2623 if (seq_sk_match(seq, sk))
2624 return sk;
2625 }
2626
2627 hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2628 ilb2 = &hinfo->lhash2[st->bucket];
2629 spin_unlock(&ilb2->lock);
2630 ++st->bucket;
2631 return listening_get_first(seq);
2632}
2633
2634static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2635{
2636 struct tcp_iter_state *st = seq->private;
2637 void *rc;
2638
2639 st->bucket = 0;
2640 st->offset = 0;
2641 rc = listening_get_first(seq);
2642
2643 while (rc && *pos) {
2644 rc = listening_get_next(seq, rc);
2645 --*pos;
2646 }
2647 return rc;
2648}
2649
2650static inline bool empty_bucket(struct inet_hashinfo *hinfo,
2651 const struct tcp_iter_state *st)
2652{
2653 return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain);
2654}
2655
2656/*
2657 * Get first established socket starting from bucket given in st->bucket.
2658 * If st->bucket is zero, the very first socket in the hash is returned.
2659 */
2660static void *established_get_first(struct seq_file *seq)
2661{
2662 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2663 struct tcp_iter_state *st = seq->private;
2664
2665 st->offset = 0;
2666 for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) {
2667 struct sock *sk;
2668 struct hlist_nulls_node *node;
2669 spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
2670
2671 cond_resched();
2672
2673 /* Lockless fast path for the common case of empty buckets */
2674 if (empty_bucket(hinfo, st))
2675 continue;
2676
2677 spin_lock_bh(lock);
2678 sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
2679 if (seq_sk_match(seq, sk))
2680 return sk;
2681 }
2682 spin_unlock_bh(lock);
2683 }
2684
2685 return NULL;
2686}
2687
2688static void *established_get_next(struct seq_file *seq, void *cur)
2689{
2690 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2691 struct tcp_iter_state *st = seq->private;
2692 struct hlist_nulls_node *node;
2693 struct sock *sk = cur;
2694
2695 ++st->num;
2696 ++st->offset;
2697
2698 sk = sk_nulls_next(sk);
2699
2700 sk_nulls_for_each_from(sk, node) {
2701 if (seq_sk_match(seq, sk))
2702 return sk;
2703 }
2704
2705 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2706 ++st->bucket;
2707 return established_get_first(seq);
2708}
2709
2710static void *established_get_idx(struct seq_file *seq, loff_t pos)
2711{
2712 struct tcp_iter_state *st = seq->private;
2713 void *rc;
2714
2715 st->bucket = 0;
2716 rc = established_get_first(seq);
2717
2718 while (rc && pos) {
2719 rc = established_get_next(seq, rc);
2720 --pos;
2721 }
2722 return rc;
2723}
2724
2725static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2726{
2727 void *rc;
2728 struct tcp_iter_state *st = seq->private;
2729
2730 st->state = TCP_SEQ_STATE_LISTENING;
2731 rc = listening_get_idx(seq, &pos);
2732
2733 if (!rc) {
2734 st->state = TCP_SEQ_STATE_ESTABLISHED;
2735 rc = established_get_idx(seq, pos);
2736 }
2737
2738 return rc;
2739}
2740
2741static void *tcp_seek_last_pos(struct seq_file *seq)
2742{
2743 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2744 struct tcp_iter_state *st = seq->private;
2745 int bucket = st->bucket;
2746 int offset = st->offset;
2747 int orig_num = st->num;
2748 void *rc = NULL;
2749
2750 switch (st->state) {
2751 case TCP_SEQ_STATE_LISTENING:
2752 if (st->bucket > hinfo->lhash2_mask)
2753 break;
2754 rc = listening_get_first(seq);
2755 while (offset-- && rc && bucket == st->bucket)
2756 rc = listening_get_next(seq, rc);
2757 if (rc)
2758 break;
2759 st->bucket = 0;
2760 st->state = TCP_SEQ_STATE_ESTABLISHED;
2761 fallthrough;
2762 case TCP_SEQ_STATE_ESTABLISHED:
2763 if (st->bucket > hinfo->ehash_mask)
2764 break;
2765 rc = established_get_first(seq);
2766 while (offset-- && rc && bucket == st->bucket)
2767 rc = established_get_next(seq, rc);
2768 }
2769
2770 st->num = orig_num;
2771
2772 return rc;
2773}
2774
2775void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2776{
2777 struct tcp_iter_state *st = seq->private;
2778 void *rc;
2779
2780 if (*pos && *pos == st->last_pos) {
2781 rc = tcp_seek_last_pos(seq);
2782 if (rc)
2783 goto out;
2784 }
2785
2786 st->state = TCP_SEQ_STATE_LISTENING;
2787 st->num = 0;
2788 st->bucket = 0;
2789 st->offset = 0;
2790 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2791
2792out:
2793 st->last_pos = *pos;
2794 return rc;
2795}
2796EXPORT_SYMBOL(tcp_seq_start);
2797
2798void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2799{
2800 struct tcp_iter_state *st = seq->private;
2801 void *rc = NULL;
2802
2803 if (v == SEQ_START_TOKEN) {
2804 rc = tcp_get_idx(seq, 0);
2805 goto out;
2806 }
2807
2808 switch (st->state) {
2809 case TCP_SEQ_STATE_LISTENING:
2810 rc = listening_get_next(seq, v);
2811 if (!rc) {
2812 st->state = TCP_SEQ_STATE_ESTABLISHED;
2813 st->bucket = 0;
2814 st->offset = 0;
2815 rc = established_get_first(seq);
2816 }
2817 break;
2818 case TCP_SEQ_STATE_ESTABLISHED:
2819 rc = established_get_next(seq, v);
2820 break;
2821 }
2822out:
2823 ++*pos;
2824 st->last_pos = *pos;
2825 return rc;
2826}
2827EXPORT_SYMBOL(tcp_seq_next);
2828
2829void tcp_seq_stop(struct seq_file *seq, void *v)
2830{
2831 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2832 struct tcp_iter_state *st = seq->private;
2833
2834 switch (st->state) {
2835 case TCP_SEQ_STATE_LISTENING:
2836 if (v != SEQ_START_TOKEN)
2837 spin_unlock(&hinfo->lhash2[st->bucket].lock);
2838 break;
2839 case TCP_SEQ_STATE_ESTABLISHED:
2840 if (v)
2841 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2842 break;
2843 }
2844}
2845EXPORT_SYMBOL(tcp_seq_stop);
2846
2847static void get_openreq4(const struct request_sock *req,
2848 struct seq_file *f, int i)
2849{
2850 const struct inet_request_sock *ireq = inet_rsk(req);
2851 long delta = req->rsk_timer.expires - jiffies;
2852
2853 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2854 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2855 i,
2856 ireq->ir_loc_addr,
2857 ireq->ir_num,
2858 ireq->ir_rmt_addr,
2859 ntohs(ireq->ir_rmt_port),
2860 TCP_SYN_RECV,
2861 0, 0, /* could print option size, but that is af dependent. */
2862 1, /* timers active (only the expire timer) */
2863 jiffies_delta_to_clock_t(delta),
2864 req->num_timeout,
2865 from_kuid_munged(seq_user_ns(f),
2866 sock_i_uid(req->rsk_listener)),
2867 0, /* non standard timer */
2868 0, /* open_requests have no inode */
2869 0,
2870 req);
2871}
2872
2873static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2874{
2875 int timer_active;
2876 unsigned long timer_expires;
2877 const struct tcp_sock *tp = tcp_sk(sk);
2878 const struct inet_connection_sock *icsk = inet_csk(sk);
2879 const struct inet_sock *inet = inet_sk(sk);
2880 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2881 __be32 dest = inet->inet_daddr;
2882 __be32 src = inet->inet_rcv_saddr;
2883 __u16 destp = ntohs(inet->inet_dport);
2884 __u16 srcp = ntohs(inet->inet_sport);
2885 int rx_queue;
2886 int state;
2887
2888 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2889 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2890 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2891 timer_active = 1;
2892 timer_expires = icsk->icsk_timeout;
2893 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2894 timer_active = 4;
2895 timer_expires = icsk->icsk_timeout;
2896 } else if (timer_pending(&sk->sk_timer)) {
2897 timer_active = 2;
2898 timer_expires = sk->sk_timer.expires;
2899 } else {
2900 timer_active = 0;
2901 timer_expires = jiffies;
2902 }
2903
2904 state = inet_sk_state_load(sk);
2905 if (state == TCP_LISTEN)
2906 rx_queue = READ_ONCE(sk->sk_ack_backlog);
2907 else
2908 /* Because we don't lock the socket,
2909 * we might find a transient negative value.
2910 */
2911 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2912 READ_ONCE(tp->copied_seq), 0);
2913
2914 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2915 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2916 i, src, srcp, dest, destp, state,
2917 READ_ONCE(tp->write_seq) - tp->snd_una,
2918 rx_queue,
2919 timer_active,
2920 jiffies_delta_to_clock_t(timer_expires - jiffies),
2921 icsk->icsk_retransmits,
2922 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2923 icsk->icsk_probes_out,
2924 sock_i_ino(sk),
2925 refcount_read(&sk->sk_refcnt), sk,
2926 jiffies_to_clock_t(icsk->icsk_rto),
2927 jiffies_to_clock_t(icsk->icsk_ack.ato),
2928 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2929 tcp_snd_cwnd(tp),
2930 state == TCP_LISTEN ?
2931 fastopenq->max_qlen :
2932 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2933}
2934
2935static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2936 struct seq_file *f, int i)
2937{
2938 long delta = tw->tw_timer.expires - jiffies;
2939 __be32 dest, src;
2940 __u16 destp, srcp;
2941
2942 dest = tw->tw_daddr;
2943 src = tw->tw_rcv_saddr;
2944 destp = ntohs(tw->tw_dport);
2945 srcp = ntohs(tw->tw_sport);
2946
2947 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2948 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2949 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2950 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2951 refcount_read(&tw->tw_refcnt), tw);
2952}
2953
2954#define TMPSZ 150
2955
2956static int tcp4_seq_show(struct seq_file *seq, void *v)
2957{
2958 struct tcp_iter_state *st;
2959 struct sock *sk = v;
2960
2961 seq_setwidth(seq, TMPSZ - 1);
2962 if (v == SEQ_START_TOKEN) {
2963 seq_puts(seq, " sl local_address rem_address st tx_queue "
2964 "rx_queue tr tm->when retrnsmt uid timeout "
2965 "inode");
2966 goto out;
2967 }
2968 st = seq->private;
2969
2970 if (sk->sk_state == TCP_TIME_WAIT)
2971 get_timewait4_sock(v, seq, st->num);
2972 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2973 get_openreq4(v, seq, st->num);
2974 else
2975 get_tcp4_sock(v, seq, st->num);
2976out:
2977 seq_pad(seq, '\n');
2978 return 0;
2979}
2980
2981#ifdef CONFIG_BPF_SYSCALL
2982struct bpf_tcp_iter_state {
2983 struct tcp_iter_state state;
2984 unsigned int cur_sk;
2985 unsigned int end_sk;
2986 unsigned int max_sk;
2987 struct sock **batch;
2988 bool st_bucket_done;
2989};
2990
2991struct bpf_iter__tcp {
2992 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2993 __bpf_md_ptr(struct sock_common *, sk_common);
2994 uid_t uid __aligned(8);
2995};
2996
2997static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2998 struct sock_common *sk_common, uid_t uid)
2999{
3000 struct bpf_iter__tcp ctx;
3001
3002 meta->seq_num--; /* skip SEQ_START_TOKEN */
3003 ctx.meta = meta;
3004 ctx.sk_common = sk_common;
3005 ctx.uid = uid;
3006 return bpf_iter_run_prog(prog, &ctx);
3007}
3008
3009static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
3010{
3011 while (iter->cur_sk < iter->end_sk)
3012 sock_gen_put(iter->batch[iter->cur_sk++]);
3013}
3014
3015static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
3016 unsigned int new_batch_sz)
3017{
3018 struct sock **new_batch;
3019
3020 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3021 GFP_USER | __GFP_NOWARN);
3022 if (!new_batch)
3023 return -ENOMEM;
3024
3025 bpf_iter_tcp_put_batch(iter);
3026 kvfree(iter->batch);
3027 iter->batch = new_batch;
3028 iter->max_sk = new_batch_sz;
3029
3030 return 0;
3031}
3032
3033static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
3034 struct sock *start_sk)
3035{
3036 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3037 struct bpf_tcp_iter_state *iter = seq->private;
3038 struct tcp_iter_state *st = &iter->state;
3039 struct hlist_nulls_node *node;
3040 unsigned int expected = 1;
3041 struct sock *sk;
3042
3043 sock_hold(start_sk);
3044 iter->batch[iter->end_sk++] = start_sk;
3045
3046 sk = sk_nulls_next(start_sk);
3047 sk_nulls_for_each_from(sk, node) {
3048 if (seq_sk_match(seq, sk)) {
3049 if (iter->end_sk < iter->max_sk) {
3050 sock_hold(sk);
3051 iter->batch[iter->end_sk++] = sk;
3052 }
3053 expected++;
3054 }
3055 }
3056 spin_unlock(&hinfo->lhash2[st->bucket].lock);
3057
3058 return expected;
3059}
3060
3061static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
3062 struct sock *start_sk)
3063{
3064 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3065 struct bpf_tcp_iter_state *iter = seq->private;
3066 struct tcp_iter_state *st = &iter->state;
3067 struct hlist_nulls_node *node;
3068 unsigned int expected = 1;
3069 struct sock *sk;
3070
3071 sock_hold(start_sk);
3072 iter->batch[iter->end_sk++] = start_sk;
3073
3074 sk = sk_nulls_next(start_sk);
3075 sk_nulls_for_each_from(sk, node) {
3076 if (seq_sk_match(seq, sk)) {
3077 if (iter->end_sk < iter->max_sk) {
3078 sock_hold(sk);
3079 iter->batch[iter->end_sk++] = sk;
3080 }
3081 expected++;
3082 }
3083 }
3084 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
3085
3086 return expected;
3087}
3088
3089static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
3090{
3091 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3092 struct bpf_tcp_iter_state *iter = seq->private;
3093 struct tcp_iter_state *st = &iter->state;
3094 unsigned int expected;
3095 bool resized = false;
3096 struct sock *sk;
3097
3098 /* The st->bucket is done. Directly advance to the next
3099 * bucket instead of having the tcp_seek_last_pos() to skip
3100 * one by one in the current bucket and eventually find out
3101 * it has to advance to the next bucket.
3102 */
3103 if (iter->st_bucket_done) {
3104 st->offset = 0;
3105 st->bucket++;
3106 if (st->state == TCP_SEQ_STATE_LISTENING &&
3107 st->bucket > hinfo->lhash2_mask) {
3108 st->state = TCP_SEQ_STATE_ESTABLISHED;
3109 st->bucket = 0;
3110 }
3111 }
3112
3113again:
3114 /* Get a new batch */
3115 iter->cur_sk = 0;
3116 iter->end_sk = 0;
3117 iter->st_bucket_done = false;
3118
3119 sk = tcp_seek_last_pos(seq);
3120 if (!sk)
3121 return NULL; /* Done */
3122
3123 if (st->state == TCP_SEQ_STATE_LISTENING)
3124 expected = bpf_iter_tcp_listening_batch(seq, sk);
3125 else
3126 expected = bpf_iter_tcp_established_batch(seq, sk);
3127
3128 if (iter->end_sk == expected) {
3129 iter->st_bucket_done = true;
3130 return sk;
3131 }
3132
3133 if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
3134 resized = true;
3135 goto again;
3136 }
3137
3138 return sk;
3139}
3140
3141static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
3142{
3143 /* bpf iter does not support lseek, so it always
3144 * continue from where it was stop()-ped.
3145 */
3146 if (*pos)
3147 return bpf_iter_tcp_batch(seq);
3148
3149 return SEQ_START_TOKEN;
3150}
3151
3152static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3153{
3154 struct bpf_tcp_iter_state *iter = seq->private;
3155 struct tcp_iter_state *st = &iter->state;
3156 struct sock *sk;
3157
3158 /* Whenever seq_next() is called, the iter->cur_sk is
3159 * done with seq_show(), so advance to the next sk in
3160 * the batch.
3161 */
3162 if (iter->cur_sk < iter->end_sk) {
3163 /* Keeping st->num consistent in tcp_iter_state.
3164 * bpf_iter_tcp does not use st->num.
3165 * meta.seq_num is used instead.
3166 */
3167 st->num++;
3168 /* Move st->offset to the next sk in the bucket such that
3169 * the future start() will resume at st->offset in
3170 * st->bucket. See tcp_seek_last_pos().
3171 */
3172 st->offset++;
3173 sock_gen_put(iter->batch[iter->cur_sk++]);
3174 }
3175
3176 if (iter->cur_sk < iter->end_sk)
3177 sk = iter->batch[iter->cur_sk];
3178 else
3179 sk = bpf_iter_tcp_batch(seq);
3180
3181 ++*pos;
3182 /* Keeping st->last_pos consistent in tcp_iter_state.
3183 * bpf iter does not do lseek, so st->last_pos always equals to *pos.
3184 */
3185 st->last_pos = *pos;
3186 return sk;
3187}
3188
3189static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
3190{
3191 struct bpf_iter_meta meta;
3192 struct bpf_prog *prog;
3193 struct sock *sk = v;
3194 uid_t uid;
3195 int ret;
3196
3197 if (v == SEQ_START_TOKEN)
3198 return 0;
3199
3200 if (sk_fullsock(sk))
3201 lock_sock(sk);
3202
3203 if (unlikely(sk_unhashed(sk))) {
3204 ret = SEQ_SKIP;
3205 goto unlock;
3206 }
3207
3208 if (sk->sk_state == TCP_TIME_WAIT) {
3209 uid = 0;
3210 } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
3211 const struct request_sock *req = v;
3212
3213 uid = from_kuid_munged(seq_user_ns(seq),
3214 sock_i_uid(req->rsk_listener));
3215 } else {
3216 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3217 }
3218
3219 meta.seq = seq;
3220 prog = bpf_iter_get_info(&meta, false);
3221 ret = tcp_prog_seq_show(prog, &meta, v, uid);
3222
3223unlock:
3224 if (sk_fullsock(sk))
3225 release_sock(sk);
3226 return ret;
3227
3228}
3229
3230static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
3231{
3232 struct bpf_tcp_iter_state *iter = seq->private;
3233 struct bpf_iter_meta meta;
3234 struct bpf_prog *prog;
3235
3236 if (!v) {
3237 meta.seq = seq;
3238 prog = bpf_iter_get_info(&meta, true);
3239 if (prog)
3240 (void)tcp_prog_seq_show(prog, &meta, v, 0);
3241 }
3242
3243 if (iter->cur_sk < iter->end_sk) {
3244 bpf_iter_tcp_put_batch(iter);
3245 iter->st_bucket_done = false;
3246 }
3247}
3248
3249static const struct seq_operations bpf_iter_tcp_seq_ops = {
3250 .show = bpf_iter_tcp_seq_show,
3251 .start = bpf_iter_tcp_seq_start,
3252 .next = bpf_iter_tcp_seq_next,
3253 .stop = bpf_iter_tcp_seq_stop,
3254};
3255#endif
3256static unsigned short seq_file_family(const struct seq_file *seq)
3257{
3258 const struct tcp_seq_afinfo *afinfo;
3259
3260#ifdef CONFIG_BPF_SYSCALL
3261 /* Iterated from bpf_iter. Let the bpf prog to filter instead. */
3262 if (seq->op == &bpf_iter_tcp_seq_ops)
3263 return AF_UNSPEC;
3264#endif
3265
3266 /* Iterated from proc fs */
3267 afinfo = pde_data(file_inode(seq->file));
3268 return afinfo->family;
3269}
3270
3271static const struct seq_operations tcp4_seq_ops = {
3272 .show = tcp4_seq_show,
3273 .start = tcp_seq_start,
3274 .next = tcp_seq_next,
3275 .stop = tcp_seq_stop,
3276};
3277
3278static struct tcp_seq_afinfo tcp4_seq_afinfo = {
3279 .family = AF_INET,
3280};
3281
3282static int __net_init tcp4_proc_init_net(struct net *net)
3283{
3284 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
3285 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
3286 return -ENOMEM;
3287 return 0;
3288}
3289
3290static void __net_exit tcp4_proc_exit_net(struct net *net)
3291{
3292 remove_proc_entry("tcp", net->proc_net);
3293}
3294
3295static struct pernet_operations tcp4_net_ops = {
3296 .init = tcp4_proc_init_net,
3297 .exit = tcp4_proc_exit_net,
3298};
3299
3300int __init tcp4_proc_init(void)
3301{
3302 return register_pernet_subsys(&tcp4_net_ops);
3303}
3304
3305void tcp4_proc_exit(void)
3306{
3307 unregister_pernet_subsys(&tcp4_net_ops);
3308}
3309#endif /* CONFIG_PROC_FS */
3310
3311/* @wake is one when sk_stream_write_space() calls us.
3312 * This sends EPOLLOUT only if notsent_bytes is half the limit.
3313 * This mimics the strategy used in sock_def_write_space().
3314 */
3315bool tcp_stream_memory_free(const struct sock *sk, int wake)
3316{
3317 const struct tcp_sock *tp = tcp_sk(sk);
3318 u32 notsent_bytes = READ_ONCE(tp->write_seq) -
3319 READ_ONCE(tp->snd_nxt);
3320
3321 return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
3322}
3323EXPORT_SYMBOL(tcp_stream_memory_free);
3324
3325struct proto tcp_prot = {
3326 .name = "TCP",
3327 .owner = THIS_MODULE,
3328 .close = tcp_close,
3329 .pre_connect = tcp_v4_pre_connect,
3330 .connect = tcp_v4_connect,
3331 .disconnect = tcp_disconnect,
3332 .accept = inet_csk_accept,
3333 .ioctl = tcp_ioctl,
3334 .init = tcp_v4_init_sock,
3335 .destroy = tcp_v4_destroy_sock,
3336 .shutdown = tcp_shutdown,
3337 .setsockopt = tcp_setsockopt,
3338 .getsockopt = tcp_getsockopt,
3339 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
3340 .keepalive = tcp_set_keepalive,
3341 .recvmsg = tcp_recvmsg,
3342 .sendmsg = tcp_sendmsg,
3343 .splice_eof = tcp_splice_eof,
3344 .backlog_rcv = tcp_v4_do_rcv,
3345 .release_cb = tcp_release_cb,
3346 .hash = inet_hash,
3347 .unhash = inet_unhash,
3348 .get_port = inet_csk_get_port,
3349 .put_port = inet_put_port,
3350#ifdef CONFIG_BPF_SYSCALL
3351 .psock_update_sk_prot = tcp_bpf_update_proto,
3352#endif
3353 .enter_memory_pressure = tcp_enter_memory_pressure,
3354 .leave_memory_pressure = tcp_leave_memory_pressure,
3355 .stream_memory_free = tcp_stream_memory_free,
3356 .sockets_allocated = &tcp_sockets_allocated,
3357 .orphan_count = &tcp_orphan_count,
3358
3359 .memory_allocated = &tcp_memory_allocated,
3360 .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc,
3361
3362 .memory_pressure = &tcp_memory_pressure,
3363 .sysctl_mem = sysctl_tcp_mem,
3364 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
3365 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
3366 .max_header = MAX_TCP_HEADER,
3367 .obj_size = sizeof(struct tcp_sock),
3368 .slab_flags = SLAB_TYPESAFE_BY_RCU,
3369 .twsk_prot = &tcp_timewait_sock_ops,
3370 .rsk_prot = &tcp_request_sock_ops,
3371 .h.hashinfo = NULL,
3372 .no_autobind = true,
3373 .diag_destroy = tcp_abort,
3374};
3375EXPORT_SYMBOL(tcp_prot);
3376
3377static void __net_exit tcp_sk_exit(struct net *net)
3378{
3379 if (net->ipv4.tcp_congestion_control)
3380 bpf_module_put(net->ipv4.tcp_congestion_control,
3381 net->ipv4.tcp_congestion_control->owner);
3382}
3383
3384static void __net_init tcp_set_hashinfo(struct net *net)
3385{
3386 struct inet_hashinfo *hinfo;
3387 unsigned int ehash_entries;
3388 struct net *old_net;
3389
3390 if (net_eq(net, &init_net))
3391 goto fallback;
3392
3393 old_net = current->nsproxy->net_ns;
3394 ehash_entries = READ_ONCE(old_net->ipv4.sysctl_tcp_child_ehash_entries);
3395 if (!ehash_entries)
3396 goto fallback;
3397
3398 ehash_entries = roundup_pow_of_two(ehash_entries);
3399 hinfo = inet_pernet_hashinfo_alloc(&tcp_hashinfo, ehash_entries);
3400 if (!hinfo) {
3401 pr_warn("Failed to allocate TCP ehash (entries: %u) "
3402 "for a netns, fallback to the global one\n",
3403 ehash_entries);
3404fallback:
3405 hinfo = &tcp_hashinfo;
3406 ehash_entries = tcp_hashinfo.ehash_mask + 1;
3407 }
3408
3409 net->ipv4.tcp_death_row.hashinfo = hinfo;
3410 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = ehash_entries / 2;
3411 net->ipv4.sysctl_max_syn_backlog = max(128U, ehash_entries / 128);
3412}
3413
3414static int __net_init tcp_sk_init(struct net *net)
3415{
3416 net->ipv4.sysctl_tcp_ecn = 2;
3417 net->ipv4.sysctl_tcp_ecn_fallback = 1;
3418
3419 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
3420 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
3421 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
3422 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
3423 net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
3424
3425 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
3426 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
3427 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
3428
3429 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
3430 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
3431 net->ipv4.sysctl_tcp_syncookies = 1;
3432 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
3433 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
3434 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
3435 net->ipv4.sysctl_tcp_orphan_retries = 0;
3436 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
3437 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
3438 net->ipv4.sysctl_tcp_tw_reuse = 2;
3439 net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
3440
3441 refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1);
3442 tcp_set_hashinfo(net);
3443
3444 net->ipv4.sysctl_tcp_sack = 1;
3445 net->ipv4.sysctl_tcp_window_scaling = 1;
3446 net->ipv4.sysctl_tcp_timestamps = 1;
3447 net->ipv4.sysctl_tcp_early_retrans = 3;
3448 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
3449 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
3450 net->ipv4.sysctl_tcp_retrans_collapse = 1;
3451 net->ipv4.sysctl_tcp_max_reordering = 300;
3452 net->ipv4.sysctl_tcp_dsack = 1;
3453 net->ipv4.sysctl_tcp_app_win = 31;
3454 net->ipv4.sysctl_tcp_adv_win_scale = 1;
3455 net->ipv4.sysctl_tcp_frto = 2;
3456 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
3457 /* This limits the percentage of the congestion window which we
3458 * will allow a single TSO frame to consume. Building TSO frames
3459 * which are too large can cause TCP streams to be bursty.
3460 */
3461 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
3462 /* Default TSQ limit of 16 TSO segments */
3463 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
3464
3465 /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
3466 net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
3467
3468 net->ipv4.sysctl_tcp_min_tso_segs = 2;
3469 net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */
3470 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
3471 net->ipv4.sysctl_tcp_autocorking = 1;
3472 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
3473 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
3474 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
3475 if (net != &init_net) {
3476 memcpy(net->ipv4.sysctl_tcp_rmem,
3477 init_net.ipv4.sysctl_tcp_rmem,
3478 sizeof(init_net.ipv4.sysctl_tcp_rmem));
3479 memcpy(net->ipv4.sysctl_tcp_wmem,
3480 init_net.ipv4.sysctl_tcp_wmem,
3481 sizeof(init_net.ipv4.sysctl_tcp_wmem));
3482 }
3483 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
3484 net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
3485 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
3486 net->ipv4.sysctl_tcp_backlog_ack_defer = 1;
3487 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
3488 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
3489 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
3490
3491 /* Set default values for PLB */
3492 net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */
3493 net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3;
3494 net->ipv4.sysctl_tcp_plb_rehash_rounds = 12;
3495 net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60;
3496 /* Default congestion threshold for PLB to mark a round is 50% */
3497 net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2;
3498
3499 /* Reno is always built in */
3500 if (!net_eq(net, &init_net) &&
3501 bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
3502 init_net.ipv4.tcp_congestion_control->owner))
3503 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
3504 else
3505 net->ipv4.tcp_congestion_control = &tcp_reno;
3506
3507 net->ipv4.sysctl_tcp_syn_linear_timeouts = 4;
3508 net->ipv4.sysctl_tcp_shrink_window = 0;
3509
3510 net->ipv4.sysctl_tcp_pingpong_thresh = 1;
3511
3512 return 0;
3513}
3514
3515static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3516{
3517 struct net *net;
3518
3519 tcp_twsk_purge(net_exit_list, AF_INET);
3520
3521 list_for_each_entry(net, net_exit_list, exit_list) {
3522 inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
3523 WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
3524 tcp_fastopen_ctx_destroy(net);
3525 }
3526}
3527
3528static struct pernet_operations __net_initdata tcp_sk_ops = {
3529 .init = tcp_sk_init,
3530 .exit = tcp_sk_exit,
3531 .exit_batch = tcp_sk_exit_batch,
3532};
3533
3534#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3535DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
3536 struct sock_common *sk_common, uid_t uid)
3537
3538#define INIT_BATCH_SZ 16
3539
3540static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
3541{
3542 struct bpf_tcp_iter_state *iter = priv_data;
3543 int err;
3544
3545 err = bpf_iter_init_seq_net(priv_data, aux);
3546 if (err)
3547 return err;
3548
3549 err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
3550 if (err) {
3551 bpf_iter_fini_seq_net(priv_data);
3552 return err;
3553 }
3554
3555 return 0;
3556}
3557
3558static void bpf_iter_fini_tcp(void *priv_data)
3559{
3560 struct bpf_tcp_iter_state *iter = priv_data;
3561
3562 bpf_iter_fini_seq_net(priv_data);
3563 kvfree(iter->batch);
3564}
3565
3566static const struct bpf_iter_seq_info tcp_seq_info = {
3567 .seq_ops = &bpf_iter_tcp_seq_ops,
3568 .init_seq_private = bpf_iter_init_tcp,
3569 .fini_seq_private = bpf_iter_fini_tcp,
3570 .seq_priv_size = sizeof(struct bpf_tcp_iter_state),
3571};
3572
3573static const struct bpf_func_proto *
3574bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
3575 const struct bpf_prog *prog)
3576{
3577 switch (func_id) {
3578 case BPF_FUNC_setsockopt:
3579 return &bpf_sk_setsockopt_proto;
3580 case BPF_FUNC_getsockopt:
3581 return &bpf_sk_getsockopt_proto;
3582 default:
3583 return NULL;
3584 }
3585}
3586
3587static struct bpf_iter_reg tcp_reg_info = {
3588 .target = "tcp",
3589 .ctx_arg_info_size = 1,
3590 .ctx_arg_info = {
3591 { offsetof(struct bpf_iter__tcp, sk_common),
3592 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
3593 },
3594 .get_func_proto = bpf_iter_tcp_get_func_proto,
3595 .seq_info = &tcp_seq_info,
3596};
3597
3598static void __init bpf_iter_register(void)
3599{
3600 tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
3601 if (bpf_iter_reg_target(&tcp_reg_info))
3602 pr_warn("Warning: could not register bpf iterator tcp\n");
3603}
3604
3605#endif
3606
3607void __init tcp_v4_init(void)
3608{
3609 int cpu, res;
3610
3611 for_each_possible_cpu(cpu) {
3612 struct sock *sk;
3613
3614 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3615 IPPROTO_TCP, &init_net);
3616 if (res)
3617 panic("Failed to create the TCP control socket.\n");
3618 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3619
3620 /* Please enforce IP_DF and IPID==0 for RST and
3621 * ACK sent in SYN-RECV and TIME-WAIT state.
3622 */
3623 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
3624
3625 per_cpu(ipv4_tcp_sk, cpu) = sk;
3626 }
3627 if (register_pernet_subsys(&tcp_sk_ops))
3628 panic("Failed to create the TCP control socket.\n");
3629
3630#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3631 bpf_iter_register();
3632#endif
3633}
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53#define pr_fmt(fmt) "TCP: " fmt
54
55#include <linux/bottom_half.h>
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
64#include <linux/slab.h>
65
66#include <net/net_namespace.h>
67#include <net/icmp.h>
68#include <net/inet_hashtables.h>
69#include <net/tcp.h>
70#include <net/transp_v6.h>
71#include <net/ipv6.h>
72#include <net/inet_common.h>
73#include <net/timewait_sock.h>
74#include <net/xfrm.h>
75#include <net/secure_seq.h>
76#include <net/busy_poll.h>
77
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/inetdevice.h>
84
85#include <crypto/hash.h>
86#include <linux/scatterlist.h>
87
88#include <trace/events/tcp.h>
89
90#ifdef CONFIG_TCP_MD5SIG
91static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, const struct tcphdr *th);
93#endif
94
95struct inet_hashinfo tcp_hashinfo;
96EXPORT_SYMBOL(tcp_hashinfo);
97
98static u32 tcp_v4_init_seq(const struct sk_buff *skb)
99{
100 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 ip_hdr(skb)->saddr,
102 tcp_hdr(skb)->dest,
103 tcp_hdr(skb)->source);
104}
105
106static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
107{
108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
109}
110
111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112{
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
115
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
122 holder.
123
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
126 */
127 if (tcptw->tw_ts_recent_stamp &&
128 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
132 tp->write_seq = 1;
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
135 sock_hold(sktw);
136 return 1;
137 }
138
139 return 0;
140}
141EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142
143static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
144 int addr_len)
145{
146 /* This check is replicated from tcp_v4_connect() and intended to
147 * prevent BPF program called below from accessing bytes that are out
148 * of the bound specified by user in addr_len.
149 */
150 if (addr_len < sizeof(struct sockaddr_in))
151 return -EINVAL;
152
153 sock_owned_by_me(sk);
154
155 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
156}
157
158/* This will initiate an outgoing connection. */
159int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
160{
161 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
162 struct inet_sock *inet = inet_sk(sk);
163 struct tcp_sock *tp = tcp_sk(sk);
164 __be16 orig_sport, orig_dport;
165 __be32 daddr, nexthop;
166 struct flowi4 *fl4;
167 struct rtable *rt;
168 int err;
169 struct ip_options_rcu *inet_opt;
170 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
171
172 if (addr_len < sizeof(struct sockaddr_in))
173 return -EINVAL;
174
175 if (usin->sin_family != AF_INET)
176 return -EAFNOSUPPORT;
177
178 nexthop = daddr = usin->sin_addr.s_addr;
179 inet_opt = rcu_dereference_protected(inet->inet_opt,
180 lockdep_sock_is_held(sk));
181 if (inet_opt && inet_opt->opt.srr) {
182 if (!daddr)
183 return -EINVAL;
184 nexthop = inet_opt->opt.faddr;
185 }
186
187 orig_sport = inet->inet_sport;
188 orig_dport = usin->sin_port;
189 fl4 = &inet->cork.fl.u.ip4;
190 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
191 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
192 IPPROTO_TCP,
193 orig_sport, orig_dport, sk);
194 if (IS_ERR(rt)) {
195 err = PTR_ERR(rt);
196 if (err == -ENETUNREACH)
197 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
198 return err;
199 }
200
201 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
202 ip_rt_put(rt);
203 return -ENETUNREACH;
204 }
205
206 if (!inet_opt || !inet_opt->opt.srr)
207 daddr = fl4->daddr;
208
209 if (!inet->inet_saddr)
210 inet->inet_saddr = fl4->saddr;
211 sk_rcv_saddr_set(sk, inet->inet_saddr);
212
213 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
214 /* Reset inherited state */
215 tp->rx_opt.ts_recent = 0;
216 tp->rx_opt.ts_recent_stamp = 0;
217 if (likely(!tp->repair))
218 tp->write_seq = 0;
219 }
220
221 inet->inet_dport = usin->sin_port;
222 sk_daddr_set(sk, daddr);
223
224 inet_csk(sk)->icsk_ext_hdr_len = 0;
225 if (inet_opt)
226 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
227
228 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
229
230 /* Socket identity is still unknown (sport may be zero).
231 * However we set state to SYN-SENT and not releasing socket
232 * lock select source port, enter ourselves into the hash tables and
233 * complete initialization after this.
234 */
235 tcp_set_state(sk, TCP_SYN_SENT);
236 err = inet_hash_connect(tcp_death_row, sk);
237 if (err)
238 goto failure;
239
240 sk_set_txhash(sk);
241
242 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
243 inet->inet_sport, inet->inet_dport, sk);
244 if (IS_ERR(rt)) {
245 err = PTR_ERR(rt);
246 rt = NULL;
247 goto failure;
248 }
249 /* OK, now commit destination to socket. */
250 sk->sk_gso_type = SKB_GSO_TCPV4;
251 sk_setup_caps(sk, &rt->dst);
252 rt = NULL;
253
254 if (likely(!tp->repair)) {
255 if (!tp->write_seq)
256 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
257 inet->inet_daddr,
258 inet->inet_sport,
259 usin->sin_port);
260 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
261 inet->inet_saddr,
262 inet->inet_daddr);
263 }
264
265 inet->inet_id = tp->write_seq ^ jiffies;
266
267 if (tcp_fastopen_defer_connect(sk, &err))
268 return err;
269 if (err)
270 goto failure;
271
272 err = tcp_connect(sk);
273
274 if (err)
275 goto failure;
276
277 return 0;
278
279failure:
280 /*
281 * This unhashes the socket and releases the local port,
282 * if necessary.
283 */
284 tcp_set_state(sk, TCP_CLOSE);
285 ip_rt_put(rt);
286 sk->sk_route_caps = 0;
287 inet->inet_dport = 0;
288 return err;
289}
290EXPORT_SYMBOL(tcp_v4_connect);
291
292/*
293 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
294 * It can be called through tcp_release_cb() if socket was owned by user
295 * at the time tcp_v4_err() was called to handle ICMP message.
296 */
297void tcp_v4_mtu_reduced(struct sock *sk)
298{
299 struct inet_sock *inet = inet_sk(sk);
300 struct dst_entry *dst;
301 u32 mtu;
302
303 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
304 return;
305 mtu = tcp_sk(sk)->mtu_info;
306 dst = inet_csk_update_pmtu(sk, mtu);
307 if (!dst)
308 return;
309
310 /* Something is about to be wrong... Remember soft error
311 * for the case, if this connection will not able to recover.
312 */
313 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
314 sk->sk_err_soft = EMSGSIZE;
315
316 mtu = dst_mtu(dst);
317
318 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
319 ip_sk_accept_pmtu(sk) &&
320 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
321 tcp_sync_mss(sk, mtu);
322
323 /* Resend the TCP packet because it's
324 * clear that the old packet has been
325 * dropped. This is the new "fast" path mtu
326 * discovery.
327 */
328 tcp_simple_retransmit(sk);
329 } /* else let the usual retransmit timer handle it */
330}
331EXPORT_SYMBOL(tcp_v4_mtu_reduced);
332
333static void do_redirect(struct sk_buff *skb, struct sock *sk)
334{
335 struct dst_entry *dst = __sk_dst_check(sk, 0);
336
337 if (dst)
338 dst->ops->redirect(dst, sk, skb);
339}
340
341
342/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
343void tcp_req_err(struct sock *sk, u32 seq, bool abort)
344{
345 struct request_sock *req = inet_reqsk(sk);
346 struct net *net = sock_net(sk);
347
348 /* ICMPs are not backlogged, hence we cannot get
349 * an established socket here.
350 */
351 if (seq != tcp_rsk(req)->snt_isn) {
352 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
353 } else if (abort) {
354 /*
355 * Still in SYN_RECV, just remove it silently.
356 * There is no good way to pass the error to the newly
357 * created socket, and POSIX does not want network
358 * errors returned from accept().
359 */
360 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
361 tcp_listendrop(req->rsk_listener);
362 }
363 reqsk_put(req);
364}
365EXPORT_SYMBOL(tcp_req_err);
366
367/*
368 * This routine is called by the ICMP module when it gets some
369 * sort of error condition. If err < 0 then the socket should
370 * be closed and the error returned to the user. If err > 0
371 * it's just the icmp type << 8 | icmp code. After adjustment
372 * header points to the first 8 bytes of the tcp header. We need
373 * to find the appropriate port.
374 *
375 * The locking strategy used here is very "optimistic". When
376 * someone else accesses the socket the ICMP is just dropped
377 * and for some paths there is no check at all.
378 * A more general error queue to queue errors for later handling
379 * is probably better.
380 *
381 */
382
383void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
384{
385 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
386 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
387 struct inet_connection_sock *icsk;
388 struct tcp_sock *tp;
389 struct inet_sock *inet;
390 const int type = icmp_hdr(icmp_skb)->type;
391 const int code = icmp_hdr(icmp_skb)->code;
392 struct sock *sk;
393 struct sk_buff *skb;
394 struct request_sock *fastopen;
395 u32 seq, snd_una;
396 s32 remaining;
397 u32 delta_us;
398 int err;
399 struct net *net = dev_net(icmp_skb->dev);
400
401 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
402 th->dest, iph->saddr, ntohs(th->source),
403 inet_iif(icmp_skb), 0);
404 if (!sk) {
405 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
406 return;
407 }
408 if (sk->sk_state == TCP_TIME_WAIT) {
409 inet_twsk_put(inet_twsk(sk));
410 return;
411 }
412 seq = ntohl(th->seq);
413 if (sk->sk_state == TCP_NEW_SYN_RECV)
414 return tcp_req_err(sk, seq,
415 type == ICMP_PARAMETERPROB ||
416 type == ICMP_TIME_EXCEEDED ||
417 (type == ICMP_DEST_UNREACH &&
418 (code == ICMP_NET_UNREACH ||
419 code == ICMP_HOST_UNREACH)));
420
421 bh_lock_sock(sk);
422 /* If too many ICMPs get dropped on busy
423 * servers this needs to be solved differently.
424 * We do take care of PMTU discovery (RFC1191) special case :
425 * we can receive locally generated ICMP messages while socket is held.
426 */
427 if (sock_owned_by_user(sk)) {
428 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
429 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
430 }
431 if (sk->sk_state == TCP_CLOSE)
432 goto out;
433
434 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
435 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
436 goto out;
437 }
438
439 icsk = inet_csk(sk);
440 tp = tcp_sk(sk);
441 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
442 fastopen = tp->fastopen_rsk;
443 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
444 if (sk->sk_state != TCP_LISTEN &&
445 !between(seq, snd_una, tp->snd_nxt)) {
446 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 goto out;
448 }
449
450 switch (type) {
451 case ICMP_REDIRECT:
452 if (!sock_owned_by_user(sk))
453 do_redirect(icmp_skb, sk);
454 goto out;
455 case ICMP_SOURCE_QUENCH:
456 /* Just silently ignore these. */
457 goto out;
458 case ICMP_PARAMETERPROB:
459 err = EPROTO;
460 break;
461 case ICMP_DEST_UNREACH:
462 if (code > NR_ICMP_UNREACH)
463 goto out;
464
465 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
466 /* We are not interested in TCP_LISTEN and open_requests
467 * (SYN-ACKs send out by Linux are always <576bytes so
468 * they should go through unfragmented).
469 */
470 if (sk->sk_state == TCP_LISTEN)
471 goto out;
472
473 tp->mtu_info = info;
474 if (!sock_owned_by_user(sk)) {
475 tcp_v4_mtu_reduced(sk);
476 } else {
477 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
478 sock_hold(sk);
479 }
480 goto out;
481 }
482
483 err = icmp_err_convert[code].errno;
484 /* check if icmp_skb allows revert of backoff
485 * (see draft-zimmermann-tcp-lcd) */
486 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
487 break;
488 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
489 !icsk->icsk_backoff || fastopen)
490 break;
491
492 if (sock_owned_by_user(sk))
493 break;
494
495 icsk->icsk_backoff--;
496 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
497 TCP_TIMEOUT_INIT;
498 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
499
500 skb = tcp_rtx_queue_head(sk);
501 BUG_ON(!skb);
502
503 tcp_mstamp_refresh(tp);
504 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
505 remaining = icsk->icsk_rto -
506 usecs_to_jiffies(delta_us);
507
508 if (remaining > 0) {
509 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
510 remaining, TCP_RTO_MAX);
511 } else {
512 /* RTO revert clocked out retransmission.
513 * Will retransmit now */
514 tcp_retransmit_timer(sk);
515 }
516
517 break;
518 case ICMP_TIME_EXCEEDED:
519 err = EHOSTUNREACH;
520 break;
521 default:
522 goto out;
523 }
524
525 switch (sk->sk_state) {
526 case TCP_SYN_SENT:
527 case TCP_SYN_RECV:
528 /* Only in fast or simultaneous open. If a fast open socket is
529 * is already accepted it is treated as a connected one below.
530 */
531 if (fastopen && !fastopen->sk)
532 break;
533
534 if (!sock_owned_by_user(sk)) {
535 sk->sk_err = err;
536
537 sk->sk_error_report(sk);
538
539 tcp_done(sk);
540 } else {
541 sk->sk_err_soft = err;
542 }
543 goto out;
544 }
545
546 /* If we've already connected we will keep trying
547 * until we time out, or the user gives up.
548 *
549 * rfc1122 4.2.3.9 allows to consider as hard errors
550 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
551 * but it is obsoleted by pmtu discovery).
552 *
553 * Note, that in modern internet, where routing is unreliable
554 * and in each dark corner broken firewalls sit, sending random
555 * errors ordered by their masters even this two messages finally lose
556 * their original sense (even Linux sends invalid PORT_UNREACHs)
557 *
558 * Now we are in compliance with RFCs.
559 * --ANK (980905)
560 */
561
562 inet = inet_sk(sk);
563 if (!sock_owned_by_user(sk) && inet->recverr) {
564 sk->sk_err = err;
565 sk->sk_error_report(sk);
566 } else { /* Only an error on timeout */
567 sk->sk_err_soft = err;
568 }
569
570out:
571 bh_unlock_sock(sk);
572 sock_put(sk);
573}
574
575void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
576{
577 struct tcphdr *th = tcp_hdr(skb);
578
579 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
580 skb->csum_start = skb_transport_header(skb) - skb->head;
581 skb->csum_offset = offsetof(struct tcphdr, check);
582}
583
584/* This routine computes an IPv4 TCP checksum. */
585void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
586{
587 const struct inet_sock *inet = inet_sk(sk);
588
589 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
590}
591EXPORT_SYMBOL(tcp_v4_send_check);
592
593/*
594 * This routine will send an RST to the other tcp.
595 *
596 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
597 * for reset.
598 * Answer: if a packet caused RST, it is not for a socket
599 * existing in our system, if it is matched to a socket,
600 * it is just duplicate segment or bug in other side's TCP.
601 * So that we build reply only basing on parameters
602 * arrived with segment.
603 * Exception: precedence violation. We do not implement it in any case.
604 */
605
606static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
607{
608 const struct tcphdr *th = tcp_hdr(skb);
609 struct {
610 struct tcphdr th;
611#ifdef CONFIG_TCP_MD5SIG
612 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
613#endif
614 } rep;
615 struct ip_reply_arg arg;
616#ifdef CONFIG_TCP_MD5SIG
617 struct tcp_md5sig_key *key = NULL;
618 const __u8 *hash_location = NULL;
619 unsigned char newhash[16];
620 int genhash;
621 struct sock *sk1 = NULL;
622#endif
623 struct net *net;
624
625 /* Never send a reset in response to a reset. */
626 if (th->rst)
627 return;
628
629 /* If sk not NULL, it means we did a successful lookup and incoming
630 * route had to be correct. prequeue might have dropped our dst.
631 */
632 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
633 return;
634
635 /* Swap the send and the receive. */
636 memset(&rep, 0, sizeof(rep));
637 rep.th.dest = th->source;
638 rep.th.source = th->dest;
639 rep.th.doff = sizeof(struct tcphdr) / 4;
640 rep.th.rst = 1;
641
642 if (th->ack) {
643 rep.th.seq = th->ack_seq;
644 } else {
645 rep.th.ack = 1;
646 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
647 skb->len - (th->doff << 2));
648 }
649
650 memset(&arg, 0, sizeof(arg));
651 arg.iov[0].iov_base = (unsigned char *)&rep;
652 arg.iov[0].iov_len = sizeof(rep.th);
653
654 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
655#ifdef CONFIG_TCP_MD5SIG
656 rcu_read_lock();
657 hash_location = tcp_parse_md5sig_option(th);
658 if (sk && sk_fullsock(sk)) {
659 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
660 &ip_hdr(skb)->saddr, AF_INET);
661 } else if (hash_location) {
662 /*
663 * active side is lost. Try to find listening socket through
664 * source port, and then find md5 key through listening socket.
665 * we are not loose security here:
666 * Incoming packet is checked with md5 hash with finding key,
667 * no RST generated if md5 hash doesn't match.
668 */
669 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
670 ip_hdr(skb)->saddr,
671 th->source, ip_hdr(skb)->daddr,
672 ntohs(th->source), inet_iif(skb),
673 tcp_v4_sdif(skb));
674 /* don't send rst if it can't find key */
675 if (!sk1)
676 goto out;
677
678 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
679 &ip_hdr(skb)->saddr, AF_INET);
680 if (!key)
681 goto out;
682
683
684 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
685 if (genhash || memcmp(hash_location, newhash, 16) != 0)
686 goto out;
687
688 }
689
690 if (key) {
691 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
692 (TCPOPT_NOP << 16) |
693 (TCPOPT_MD5SIG << 8) |
694 TCPOLEN_MD5SIG);
695 /* Update length and the length the header thinks exists */
696 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
697 rep.th.doff = arg.iov[0].iov_len / 4;
698
699 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
700 key, ip_hdr(skb)->saddr,
701 ip_hdr(skb)->daddr, &rep.th);
702 }
703#endif
704 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
705 ip_hdr(skb)->saddr, /* XXX */
706 arg.iov[0].iov_len, IPPROTO_TCP, 0);
707 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
708 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
709
710 /* When socket is gone, all binding information is lost.
711 * routing might fail in this case. No choice here, if we choose to force
712 * input interface, we will misroute in case of asymmetric route.
713 */
714 if (sk) {
715 arg.bound_dev_if = sk->sk_bound_dev_if;
716 if (sk_fullsock(sk))
717 trace_tcp_send_reset(sk, skb);
718 }
719
720 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
721 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
722
723 arg.tos = ip_hdr(skb)->tos;
724 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
725 local_bh_disable();
726 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
727 skb, &TCP_SKB_CB(skb)->header.h4.opt,
728 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
729 &arg, arg.iov[0].iov_len);
730
731 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
732 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
733 local_bh_enable();
734
735#ifdef CONFIG_TCP_MD5SIG
736out:
737 rcu_read_unlock();
738#endif
739}
740
741/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
742 outside socket context is ugly, certainly. What can I do?
743 */
744
745static void tcp_v4_send_ack(const struct sock *sk,
746 struct sk_buff *skb, u32 seq, u32 ack,
747 u32 win, u32 tsval, u32 tsecr, int oif,
748 struct tcp_md5sig_key *key,
749 int reply_flags, u8 tos)
750{
751 const struct tcphdr *th = tcp_hdr(skb);
752 struct {
753 struct tcphdr th;
754 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
755#ifdef CONFIG_TCP_MD5SIG
756 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
757#endif
758 ];
759 } rep;
760 struct net *net = sock_net(sk);
761 struct ip_reply_arg arg;
762
763 memset(&rep.th, 0, sizeof(struct tcphdr));
764 memset(&arg, 0, sizeof(arg));
765
766 arg.iov[0].iov_base = (unsigned char *)&rep;
767 arg.iov[0].iov_len = sizeof(rep.th);
768 if (tsecr) {
769 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
770 (TCPOPT_TIMESTAMP << 8) |
771 TCPOLEN_TIMESTAMP);
772 rep.opt[1] = htonl(tsval);
773 rep.opt[2] = htonl(tsecr);
774 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
775 }
776
777 /* Swap the send and the receive. */
778 rep.th.dest = th->source;
779 rep.th.source = th->dest;
780 rep.th.doff = arg.iov[0].iov_len / 4;
781 rep.th.seq = htonl(seq);
782 rep.th.ack_seq = htonl(ack);
783 rep.th.ack = 1;
784 rep.th.window = htons(win);
785
786#ifdef CONFIG_TCP_MD5SIG
787 if (key) {
788 int offset = (tsecr) ? 3 : 0;
789
790 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
791 (TCPOPT_NOP << 16) |
792 (TCPOPT_MD5SIG << 8) |
793 TCPOLEN_MD5SIG);
794 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
795 rep.th.doff = arg.iov[0].iov_len/4;
796
797 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
798 key, ip_hdr(skb)->saddr,
799 ip_hdr(skb)->daddr, &rep.th);
800 }
801#endif
802 arg.flags = reply_flags;
803 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
804 ip_hdr(skb)->saddr, /* XXX */
805 arg.iov[0].iov_len, IPPROTO_TCP, 0);
806 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
807 if (oif)
808 arg.bound_dev_if = oif;
809 arg.tos = tos;
810 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
811 local_bh_disable();
812 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
813 skb, &TCP_SKB_CB(skb)->header.h4.opt,
814 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
815 &arg, arg.iov[0].iov_len);
816
817 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
818 local_bh_enable();
819}
820
821static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
822{
823 struct inet_timewait_sock *tw = inet_twsk(sk);
824 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
825
826 tcp_v4_send_ack(sk, skb,
827 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
828 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
829 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
830 tcptw->tw_ts_recent,
831 tw->tw_bound_dev_if,
832 tcp_twsk_md5_key(tcptw),
833 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
834 tw->tw_tos
835 );
836
837 inet_twsk_put(tw);
838}
839
840static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
841 struct request_sock *req)
842{
843 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
844 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
845 */
846 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
847 tcp_sk(sk)->snd_nxt;
848
849 /* RFC 7323 2.3
850 * The window field (SEG.WND) of every outgoing segment, with the
851 * exception of <SYN> segments, MUST be right-shifted by
852 * Rcv.Wind.Shift bits:
853 */
854 tcp_v4_send_ack(sk, skb, seq,
855 tcp_rsk(req)->rcv_nxt,
856 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
857 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
858 req->ts_recent,
859 0,
860 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
861 AF_INET),
862 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
863 ip_hdr(skb)->tos);
864}
865
866/*
867 * Send a SYN-ACK after having received a SYN.
868 * This still operates on a request_sock only, not on a big
869 * socket.
870 */
871static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
872 struct flowi *fl,
873 struct request_sock *req,
874 struct tcp_fastopen_cookie *foc,
875 enum tcp_synack_type synack_type)
876{
877 const struct inet_request_sock *ireq = inet_rsk(req);
878 struct flowi4 fl4;
879 int err = -1;
880 struct sk_buff *skb;
881
882 /* First, grab a route. */
883 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
884 return -1;
885
886 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
887
888 if (skb) {
889 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
890
891 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
892 ireq->ir_rmt_addr,
893 ireq_opt_deref(ireq));
894 err = net_xmit_eval(err);
895 }
896
897 return err;
898}
899
900/*
901 * IPv4 request_sock destructor.
902 */
903static void tcp_v4_reqsk_destructor(struct request_sock *req)
904{
905 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
906}
907
908#ifdef CONFIG_TCP_MD5SIG
909/*
910 * RFC2385 MD5 checksumming requires a mapping of
911 * IP address->MD5 Key.
912 * We need to maintain these in the sk structure.
913 */
914
915/* Find the Key structure for an address. */
916struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
917 const union tcp_md5_addr *addr,
918 int family)
919{
920 const struct tcp_sock *tp = tcp_sk(sk);
921 struct tcp_md5sig_key *key;
922 const struct tcp_md5sig_info *md5sig;
923 __be32 mask;
924 struct tcp_md5sig_key *best_match = NULL;
925 bool match;
926
927 /* caller either holds rcu_read_lock() or socket lock */
928 md5sig = rcu_dereference_check(tp->md5sig_info,
929 lockdep_sock_is_held(sk));
930 if (!md5sig)
931 return NULL;
932
933 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
934 if (key->family != family)
935 continue;
936
937 if (family == AF_INET) {
938 mask = inet_make_mask(key->prefixlen);
939 match = (key->addr.a4.s_addr & mask) ==
940 (addr->a4.s_addr & mask);
941#if IS_ENABLED(CONFIG_IPV6)
942 } else if (family == AF_INET6) {
943 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
944 key->prefixlen);
945#endif
946 } else {
947 match = false;
948 }
949
950 if (match && (!best_match ||
951 key->prefixlen > best_match->prefixlen))
952 best_match = key;
953 }
954 return best_match;
955}
956EXPORT_SYMBOL(tcp_md5_do_lookup);
957
958static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
959 const union tcp_md5_addr *addr,
960 int family, u8 prefixlen)
961{
962 const struct tcp_sock *tp = tcp_sk(sk);
963 struct tcp_md5sig_key *key;
964 unsigned int size = sizeof(struct in_addr);
965 const struct tcp_md5sig_info *md5sig;
966
967 /* caller either holds rcu_read_lock() or socket lock */
968 md5sig = rcu_dereference_check(tp->md5sig_info,
969 lockdep_sock_is_held(sk));
970 if (!md5sig)
971 return NULL;
972#if IS_ENABLED(CONFIG_IPV6)
973 if (family == AF_INET6)
974 size = sizeof(struct in6_addr);
975#endif
976 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
977 if (key->family != family)
978 continue;
979 if (!memcmp(&key->addr, addr, size) &&
980 key->prefixlen == prefixlen)
981 return key;
982 }
983 return NULL;
984}
985
986struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
987 const struct sock *addr_sk)
988{
989 const union tcp_md5_addr *addr;
990
991 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
992 return tcp_md5_do_lookup(sk, addr, AF_INET);
993}
994EXPORT_SYMBOL(tcp_v4_md5_lookup);
995
996/* This can be called on a newly created socket, from other files */
997int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
998 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
999 gfp_t gfp)
1000{
1001 /* Add Key to the list */
1002 struct tcp_md5sig_key *key;
1003 struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_info *md5sig;
1005
1006 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1007 if (key) {
1008 /* Pre-existing entry - just update that one. */
1009 memcpy(key->key, newkey, newkeylen);
1010 key->keylen = newkeylen;
1011 return 0;
1012 }
1013
1014 md5sig = rcu_dereference_protected(tp->md5sig_info,
1015 lockdep_sock_is_held(sk));
1016 if (!md5sig) {
1017 md5sig = kmalloc(sizeof(*md5sig), gfp);
1018 if (!md5sig)
1019 return -ENOMEM;
1020
1021 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1022 INIT_HLIST_HEAD(&md5sig->head);
1023 rcu_assign_pointer(tp->md5sig_info, md5sig);
1024 }
1025
1026 key = sock_kmalloc(sk, sizeof(*key), gfp);
1027 if (!key)
1028 return -ENOMEM;
1029 if (!tcp_alloc_md5sig_pool()) {
1030 sock_kfree_s(sk, key, sizeof(*key));
1031 return -ENOMEM;
1032 }
1033
1034 memcpy(key->key, newkey, newkeylen);
1035 key->keylen = newkeylen;
1036 key->family = family;
1037 key->prefixlen = prefixlen;
1038 memcpy(&key->addr, addr,
1039 (family == AF_INET6) ? sizeof(struct in6_addr) :
1040 sizeof(struct in_addr));
1041 hlist_add_head_rcu(&key->node, &md5sig->head);
1042 return 0;
1043}
1044EXPORT_SYMBOL(tcp_md5_do_add);
1045
1046int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1047 u8 prefixlen)
1048{
1049 struct tcp_md5sig_key *key;
1050
1051 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1052 if (!key)
1053 return -ENOENT;
1054 hlist_del_rcu(&key->node);
1055 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1056 kfree_rcu(key, rcu);
1057 return 0;
1058}
1059EXPORT_SYMBOL(tcp_md5_do_del);
1060
1061static void tcp_clear_md5_list(struct sock *sk)
1062{
1063 struct tcp_sock *tp = tcp_sk(sk);
1064 struct tcp_md5sig_key *key;
1065 struct hlist_node *n;
1066 struct tcp_md5sig_info *md5sig;
1067
1068 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1069
1070 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1071 hlist_del_rcu(&key->node);
1072 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1073 kfree_rcu(key, rcu);
1074 }
1075}
1076
1077static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1078 char __user *optval, int optlen)
1079{
1080 struct tcp_md5sig cmd;
1081 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1082 u8 prefixlen = 32;
1083
1084 if (optlen < sizeof(cmd))
1085 return -EINVAL;
1086
1087 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1088 return -EFAULT;
1089
1090 if (sin->sin_family != AF_INET)
1091 return -EINVAL;
1092
1093 if (optname == TCP_MD5SIG_EXT &&
1094 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1095 prefixlen = cmd.tcpm_prefixlen;
1096 if (prefixlen > 32)
1097 return -EINVAL;
1098 }
1099
1100 if (!cmd.tcpm_keylen)
1101 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1102 AF_INET, prefixlen);
1103
1104 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1105 return -EINVAL;
1106
1107 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1108 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1109 GFP_KERNEL);
1110}
1111
1112static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1113 __be32 daddr, __be32 saddr,
1114 const struct tcphdr *th, int nbytes)
1115{
1116 struct tcp4_pseudohdr *bp;
1117 struct scatterlist sg;
1118 struct tcphdr *_th;
1119
1120 bp = hp->scratch;
1121 bp->saddr = saddr;
1122 bp->daddr = daddr;
1123 bp->pad = 0;
1124 bp->protocol = IPPROTO_TCP;
1125 bp->len = cpu_to_be16(nbytes);
1126
1127 _th = (struct tcphdr *)(bp + 1);
1128 memcpy(_th, th, sizeof(*th));
1129 _th->check = 0;
1130
1131 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1132 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1133 sizeof(*bp) + sizeof(*th));
1134 return crypto_ahash_update(hp->md5_req);
1135}
1136
1137static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1138 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1139{
1140 struct tcp_md5sig_pool *hp;
1141 struct ahash_request *req;
1142
1143 hp = tcp_get_md5sig_pool();
1144 if (!hp)
1145 goto clear_hash_noput;
1146 req = hp->md5_req;
1147
1148 if (crypto_ahash_init(req))
1149 goto clear_hash;
1150 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1151 goto clear_hash;
1152 if (tcp_md5_hash_key(hp, key))
1153 goto clear_hash;
1154 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1155 if (crypto_ahash_final(req))
1156 goto clear_hash;
1157
1158 tcp_put_md5sig_pool();
1159 return 0;
1160
1161clear_hash:
1162 tcp_put_md5sig_pool();
1163clear_hash_noput:
1164 memset(md5_hash, 0, 16);
1165 return 1;
1166}
1167
1168int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1169 const struct sock *sk,
1170 const struct sk_buff *skb)
1171{
1172 struct tcp_md5sig_pool *hp;
1173 struct ahash_request *req;
1174 const struct tcphdr *th = tcp_hdr(skb);
1175 __be32 saddr, daddr;
1176
1177 if (sk) { /* valid for establish/request sockets */
1178 saddr = sk->sk_rcv_saddr;
1179 daddr = sk->sk_daddr;
1180 } else {
1181 const struct iphdr *iph = ip_hdr(skb);
1182 saddr = iph->saddr;
1183 daddr = iph->daddr;
1184 }
1185
1186 hp = tcp_get_md5sig_pool();
1187 if (!hp)
1188 goto clear_hash_noput;
1189 req = hp->md5_req;
1190
1191 if (crypto_ahash_init(req))
1192 goto clear_hash;
1193
1194 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1195 goto clear_hash;
1196 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1197 goto clear_hash;
1198 if (tcp_md5_hash_key(hp, key))
1199 goto clear_hash;
1200 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1201 if (crypto_ahash_final(req))
1202 goto clear_hash;
1203
1204 tcp_put_md5sig_pool();
1205 return 0;
1206
1207clear_hash:
1208 tcp_put_md5sig_pool();
1209clear_hash_noput:
1210 memset(md5_hash, 0, 16);
1211 return 1;
1212}
1213EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1214
1215#endif
1216
1217/* Called with rcu_read_lock() */
1218static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1219 const struct sk_buff *skb)
1220{
1221#ifdef CONFIG_TCP_MD5SIG
1222 /*
1223 * This gets called for each TCP segment that arrives
1224 * so we want to be efficient.
1225 * We have 3 drop cases:
1226 * o No MD5 hash and one expected.
1227 * o MD5 hash and we're not expecting one.
1228 * o MD5 hash and its wrong.
1229 */
1230 const __u8 *hash_location = NULL;
1231 struct tcp_md5sig_key *hash_expected;
1232 const struct iphdr *iph = ip_hdr(skb);
1233 const struct tcphdr *th = tcp_hdr(skb);
1234 int genhash;
1235 unsigned char newhash[16];
1236
1237 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1238 AF_INET);
1239 hash_location = tcp_parse_md5sig_option(th);
1240
1241 /* We've parsed the options - do we have a hash? */
1242 if (!hash_expected && !hash_location)
1243 return false;
1244
1245 if (hash_expected && !hash_location) {
1246 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1247 return true;
1248 }
1249
1250 if (!hash_expected && hash_location) {
1251 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1252 return true;
1253 }
1254
1255 /* Okay, so this is hash_expected and hash_location -
1256 * so we need to calculate the checksum.
1257 */
1258 genhash = tcp_v4_md5_hash_skb(newhash,
1259 hash_expected,
1260 NULL, skb);
1261
1262 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1263 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1264 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1265 &iph->saddr, ntohs(th->source),
1266 &iph->daddr, ntohs(th->dest),
1267 genhash ? " tcp_v4_calc_md5_hash failed"
1268 : "");
1269 return true;
1270 }
1271 return false;
1272#endif
1273 return false;
1274}
1275
1276static void tcp_v4_init_req(struct request_sock *req,
1277 const struct sock *sk_listener,
1278 struct sk_buff *skb)
1279{
1280 struct inet_request_sock *ireq = inet_rsk(req);
1281 struct net *net = sock_net(sk_listener);
1282
1283 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1284 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1285 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1286}
1287
1288static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1289 struct flowi *fl,
1290 const struct request_sock *req)
1291{
1292 return inet_csk_route_req(sk, &fl->u.ip4, req);
1293}
1294
1295struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1296 .family = PF_INET,
1297 .obj_size = sizeof(struct tcp_request_sock),
1298 .rtx_syn_ack = tcp_rtx_synack,
1299 .send_ack = tcp_v4_reqsk_send_ack,
1300 .destructor = tcp_v4_reqsk_destructor,
1301 .send_reset = tcp_v4_send_reset,
1302 .syn_ack_timeout = tcp_syn_ack_timeout,
1303};
1304
1305static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1306 .mss_clamp = TCP_MSS_DEFAULT,
1307#ifdef CONFIG_TCP_MD5SIG
1308 .req_md5_lookup = tcp_v4_md5_lookup,
1309 .calc_md5_hash = tcp_v4_md5_hash_skb,
1310#endif
1311 .init_req = tcp_v4_init_req,
1312#ifdef CONFIG_SYN_COOKIES
1313 .cookie_init_seq = cookie_v4_init_sequence,
1314#endif
1315 .route_req = tcp_v4_route_req,
1316 .init_seq = tcp_v4_init_seq,
1317 .init_ts_off = tcp_v4_init_ts_off,
1318 .send_synack = tcp_v4_send_synack,
1319};
1320
1321int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1322{
1323 /* Never answer to SYNs send to broadcast or multicast */
1324 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1325 goto drop;
1326
1327 return tcp_conn_request(&tcp_request_sock_ops,
1328 &tcp_request_sock_ipv4_ops, sk, skb);
1329
1330drop:
1331 tcp_listendrop(sk);
1332 return 0;
1333}
1334EXPORT_SYMBOL(tcp_v4_conn_request);
1335
1336
1337/*
1338 * The three way handshake has completed - we got a valid synack -
1339 * now create the new socket.
1340 */
1341struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1342 struct request_sock *req,
1343 struct dst_entry *dst,
1344 struct request_sock *req_unhash,
1345 bool *own_req)
1346{
1347 struct inet_request_sock *ireq;
1348 struct inet_sock *newinet;
1349 struct tcp_sock *newtp;
1350 struct sock *newsk;
1351#ifdef CONFIG_TCP_MD5SIG
1352 struct tcp_md5sig_key *key;
1353#endif
1354 struct ip_options_rcu *inet_opt;
1355
1356 if (sk_acceptq_is_full(sk))
1357 goto exit_overflow;
1358
1359 newsk = tcp_create_openreq_child(sk, req, skb);
1360 if (!newsk)
1361 goto exit_nonewsk;
1362
1363 newsk->sk_gso_type = SKB_GSO_TCPV4;
1364 inet_sk_rx_dst_set(newsk, skb);
1365
1366 newtp = tcp_sk(newsk);
1367 newinet = inet_sk(newsk);
1368 ireq = inet_rsk(req);
1369 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1370 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1371 newsk->sk_bound_dev_if = ireq->ir_iif;
1372 newinet->inet_saddr = ireq->ir_loc_addr;
1373 inet_opt = rcu_dereference(ireq->ireq_opt);
1374 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1375 newinet->mc_index = inet_iif(skb);
1376 newinet->mc_ttl = ip_hdr(skb)->ttl;
1377 newinet->rcv_tos = ip_hdr(skb)->tos;
1378 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1379 if (inet_opt)
1380 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1381 newinet->inet_id = newtp->write_seq ^ jiffies;
1382
1383 if (!dst) {
1384 dst = inet_csk_route_child_sock(sk, newsk, req);
1385 if (!dst)
1386 goto put_and_exit;
1387 } else {
1388 /* syncookie case : see end of cookie_v4_check() */
1389 }
1390 sk_setup_caps(newsk, dst);
1391
1392 tcp_ca_openreq_child(newsk, dst);
1393
1394 tcp_sync_mss(newsk, dst_mtu(dst));
1395 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1396
1397 tcp_initialize_rcv_mss(newsk);
1398
1399#ifdef CONFIG_TCP_MD5SIG
1400 /* Copy over the MD5 key from the original socket */
1401 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1402 AF_INET);
1403 if (key) {
1404 /*
1405 * We're using one, so create a matching key
1406 * on the newsk structure. If we fail to get
1407 * memory, then we end up not copying the key
1408 * across. Shucks.
1409 */
1410 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1411 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1412 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1413 }
1414#endif
1415
1416 if (__inet_inherit_port(sk, newsk) < 0)
1417 goto put_and_exit;
1418 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1419 if (likely(*own_req)) {
1420 tcp_move_syn(newtp, req);
1421 ireq->ireq_opt = NULL;
1422 } else {
1423 newinet->inet_opt = NULL;
1424 }
1425 return newsk;
1426
1427exit_overflow:
1428 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1429exit_nonewsk:
1430 dst_release(dst);
1431exit:
1432 tcp_listendrop(sk);
1433 return NULL;
1434put_and_exit:
1435 newinet->inet_opt = NULL;
1436 inet_csk_prepare_forced_close(newsk);
1437 tcp_done(newsk);
1438 goto exit;
1439}
1440EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1441
1442static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1443{
1444#ifdef CONFIG_SYN_COOKIES
1445 const struct tcphdr *th = tcp_hdr(skb);
1446
1447 if (!th->syn)
1448 sk = cookie_v4_check(sk, skb);
1449#endif
1450 return sk;
1451}
1452
1453/* The socket must have it's spinlock held when we get
1454 * here, unless it is a TCP_LISTEN socket.
1455 *
1456 * We have a potential double-lock case here, so even when
1457 * doing backlog processing we use the BH locking scheme.
1458 * This is because we cannot sleep with the original spinlock
1459 * held.
1460 */
1461int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1462{
1463 struct sock *rsk;
1464
1465 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1466 struct dst_entry *dst = sk->sk_rx_dst;
1467
1468 sock_rps_save_rxhash(sk, skb);
1469 sk_mark_napi_id(sk, skb);
1470 if (dst) {
1471 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1472 !dst->ops->check(dst, 0)) {
1473 dst_release(dst);
1474 sk->sk_rx_dst = NULL;
1475 }
1476 }
1477 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1478 return 0;
1479 }
1480
1481 if (tcp_checksum_complete(skb))
1482 goto csum_err;
1483
1484 if (sk->sk_state == TCP_LISTEN) {
1485 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1486
1487 if (!nsk)
1488 goto discard;
1489 if (nsk != sk) {
1490 if (tcp_child_process(sk, nsk, skb)) {
1491 rsk = nsk;
1492 goto reset;
1493 }
1494 return 0;
1495 }
1496 } else
1497 sock_rps_save_rxhash(sk, skb);
1498
1499 if (tcp_rcv_state_process(sk, skb)) {
1500 rsk = sk;
1501 goto reset;
1502 }
1503 return 0;
1504
1505reset:
1506 tcp_v4_send_reset(rsk, skb);
1507discard:
1508 kfree_skb(skb);
1509 /* Be careful here. If this function gets more complicated and
1510 * gcc suffers from register pressure on the x86, sk (in %ebx)
1511 * might be destroyed here. This current version compiles correctly,
1512 * but you have been warned.
1513 */
1514 return 0;
1515
1516csum_err:
1517 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1518 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1519 goto discard;
1520}
1521EXPORT_SYMBOL(tcp_v4_do_rcv);
1522
1523int tcp_v4_early_demux(struct sk_buff *skb)
1524{
1525 const struct iphdr *iph;
1526 const struct tcphdr *th;
1527 struct sock *sk;
1528
1529 if (skb->pkt_type != PACKET_HOST)
1530 return 0;
1531
1532 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1533 return 0;
1534
1535 iph = ip_hdr(skb);
1536 th = tcp_hdr(skb);
1537
1538 if (th->doff < sizeof(struct tcphdr) / 4)
1539 return 0;
1540
1541 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1542 iph->saddr, th->source,
1543 iph->daddr, ntohs(th->dest),
1544 skb->skb_iif, inet_sdif(skb));
1545 if (sk) {
1546 skb->sk = sk;
1547 skb->destructor = sock_edemux;
1548 if (sk_fullsock(sk)) {
1549 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1550
1551 if (dst)
1552 dst = dst_check(dst, 0);
1553 if (dst &&
1554 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1555 skb_dst_set_noref(skb, dst);
1556 }
1557 }
1558 return 0;
1559}
1560
1561bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1562{
1563 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1564
1565 /* Only socket owner can try to collapse/prune rx queues
1566 * to reduce memory overhead, so add a little headroom here.
1567 * Few sockets backlog are possibly concurrently non empty.
1568 */
1569 limit += 64*1024;
1570
1571 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1572 * we can fix skb->truesize to its real value to avoid future drops.
1573 * This is valid because skb is not yet charged to the socket.
1574 * It has been noticed pure SACK packets were sometimes dropped
1575 * (if cooked by drivers without copybreak feature).
1576 */
1577 skb_condense(skb);
1578
1579 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1580 bh_unlock_sock(sk);
1581 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1582 return true;
1583 }
1584 return false;
1585}
1586EXPORT_SYMBOL(tcp_add_backlog);
1587
1588int tcp_filter(struct sock *sk, struct sk_buff *skb)
1589{
1590 struct tcphdr *th = (struct tcphdr *)skb->data;
1591 unsigned int eaten = skb->len;
1592 int err;
1593
1594 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1595 if (!err) {
1596 eaten -= skb->len;
1597 TCP_SKB_CB(skb)->end_seq -= eaten;
1598 }
1599 return err;
1600}
1601EXPORT_SYMBOL(tcp_filter);
1602
1603static void tcp_v4_restore_cb(struct sk_buff *skb)
1604{
1605 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1606 sizeof(struct inet_skb_parm));
1607}
1608
1609static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1610 const struct tcphdr *th)
1611{
1612 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1613 * barrier() makes sure compiler wont play fool^Waliasing games.
1614 */
1615 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1616 sizeof(struct inet_skb_parm));
1617 barrier();
1618
1619 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621 skb->len - th->doff * 4);
1622 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1624 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1625 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1626 TCP_SKB_CB(skb)->sacked = 0;
1627 TCP_SKB_CB(skb)->has_rxtstamp =
1628 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1629}
1630
1631/*
1632 * From tcp_input.c
1633 */
1634
1635int tcp_v4_rcv(struct sk_buff *skb)
1636{
1637 struct net *net = dev_net(skb->dev);
1638 int sdif = inet_sdif(skb);
1639 const struct iphdr *iph;
1640 const struct tcphdr *th;
1641 bool refcounted;
1642 struct sock *sk;
1643 int ret;
1644
1645 if (skb->pkt_type != PACKET_HOST)
1646 goto discard_it;
1647
1648 /* Count it even if it's bad */
1649 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1650
1651 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1652 goto discard_it;
1653
1654 th = (const struct tcphdr *)skb->data;
1655
1656 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1657 goto bad_packet;
1658 if (!pskb_may_pull(skb, th->doff * 4))
1659 goto discard_it;
1660
1661 /* An explanation is required here, I think.
1662 * Packet length and doff are validated by header prediction,
1663 * provided case of th->doff==0 is eliminated.
1664 * So, we defer the checks. */
1665
1666 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1667 goto csum_error;
1668
1669 th = (const struct tcphdr *)skb->data;
1670 iph = ip_hdr(skb);
1671lookup:
1672 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1673 th->dest, sdif, &refcounted);
1674 if (!sk)
1675 goto no_tcp_socket;
1676
1677process:
1678 if (sk->sk_state == TCP_TIME_WAIT)
1679 goto do_time_wait;
1680
1681 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1682 struct request_sock *req = inet_reqsk(sk);
1683 bool req_stolen = false;
1684 struct sock *nsk;
1685
1686 sk = req->rsk_listener;
1687 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1688 sk_drops_add(sk, skb);
1689 reqsk_put(req);
1690 goto discard_it;
1691 }
1692 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1693 inet_csk_reqsk_queue_drop_and_put(sk, req);
1694 goto lookup;
1695 }
1696 /* We own a reference on the listener, increase it again
1697 * as we might lose it too soon.
1698 */
1699 sock_hold(sk);
1700 refcounted = true;
1701 nsk = NULL;
1702 if (!tcp_filter(sk, skb)) {
1703 th = (const struct tcphdr *)skb->data;
1704 iph = ip_hdr(skb);
1705 tcp_v4_fill_cb(skb, iph, th);
1706 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
1707 }
1708 if (!nsk) {
1709 reqsk_put(req);
1710 if (req_stolen) {
1711 /* Another cpu got exclusive access to req
1712 * and created a full blown socket.
1713 * Try to feed this packet to this socket
1714 * instead of discarding it.
1715 */
1716 tcp_v4_restore_cb(skb);
1717 sock_put(sk);
1718 goto lookup;
1719 }
1720 goto discard_and_relse;
1721 }
1722 if (nsk == sk) {
1723 reqsk_put(req);
1724 tcp_v4_restore_cb(skb);
1725 } else if (tcp_child_process(sk, nsk, skb)) {
1726 tcp_v4_send_reset(nsk, skb);
1727 goto discard_and_relse;
1728 } else {
1729 sock_put(sk);
1730 return 0;
1731 }
1732 }
1733 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1734 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1735 goto discard_and_relse;
1736 }
1737
1738 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1739 goto discard_and_relse;
1740
1741 if (tcp_v4_inbound_md5_hash(sk, skb))
1742 goto discard_and_relse;
1743
1744 nf_reset(skb);
1745
1746 if (tcp_filter(sk, skb))
1747 goto discard_and_relse;
1748 th = (const struct tcphdr *)skb->data;
1749 iph = ip_hdr(skb);
1750 tcp_v4_fill_cb(skb, iph, th);
1751
1752 skb->dev = NULL;
1753
1754 if (sk->sk_state == TCP_LISTEN) {
1755 ret = tcp_v4_do_rcv(sk, skb);
1756 goto put_and_return;
1757 }
1758
1759 sk_incoming_cpu_update(sk);
1760
1761 bh_lock_sock_nested(sk);
1762 tcp_segs_in(tcp_sk(sk), skb);
1763 ret = 0;
1764 if (!sock_owned_by_user(sk)) {
1765 ret = tcp_v4_do_rcv(sk, skb);
1766 } else if (tcp_add_backlog(sk, skb)) {
1767 goto discard_and_relse;
1768 }
1769 bh_unlock_sock(sk);
1770
1771put_and_return:
1772 if (refcounted)
1773 sock_put(sk);
1774
1775 return ret;
1776
1777no_tcp_socket:
1778 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1779 goto discard_it;
1780
1781 tcp_v4_fill_cb(skb, iph, th);
1782
1783 if (tcp_checksum_complete(skb)) {
1784csum_error:
1785 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1786bad_packet:
1787 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1788 } else {
1789 tcp_v4_send_reset(NULL, skb);
1790 }
1791
1792discard_it:
1793 /* Discard frame. */
1794 kfree_skb(skb);
1795 return 0;
1796
1797discard_and_relse:
1798 sk_drops_add(sk, skb);
1799 if (refcounted)
1800 sock_put(sk);
1801 goto discard_it;
1802
1803do_time_wait:
1804 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1805 inet_twsk_put(inet_twsk(sk));
1806 goto discard_it;
1807 }
1808
1809 tcp_v4_fill_cb(skb, iph, th);
1810
1811 if (tcp_checksum_complete(skb)) {
1812 inet_twsk_put(inet_twsk(sk));
1813 goto csum_error;
1814 }
1815 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1816 case TCP_TW_SYN: {
1817 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1818 &tcp_hashinfo, skb,
1819 __tcp_hdrlen(th),
1820 iph->saddr, th->source,
1821 iph->daddr, th->dest,
1822 inet_iif(skb),
1823 sdif);
1824 if (sk2) {
1825 inet_twsk_deschedule_put(inet_twsk(sk));
1826 sk = sk2;
1827 tcp_v4_restore_cb(skb);
1828 refcounted = false;
1829 goto process;
1830 }
1831 }
1832 /* to ACK */
1833 /* fall through */
1834 case TCP_TW_ACK:
1835 tcp_v4_timewait_ack(sk, skb);
1836 break;
1837 case TCP_TW_RST:
1838 tcp_v4_send_reset(sk, skb);
1839 inet_twsk_deschedule_put(inet_twsk(sk));
1840 goto discard_it;
1841 case TCP_TW_SUCCESS:;
1842 }
1843 goto discard_it;
1844}
1845
1846static struct timewait_sock_ops tcp_timewait_sock_ops = {
1847 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1848 .twsk_unique = tcp_twsk_unique,
1849 .twsk_destructor= tcp_twsk_destructor,
1850};
1851
1852void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1853{
1854 struct dst_entry *dst = skb_dst(skb);
1855
1856 if (dst && dst_hold_safe(dst)) {
1857 sk->sk_rx_dst = dst;
1858 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1859 }
1860}
1861EXPORT_SYMBOL(inet_sk_rx_dst_set);
1862
1863const struct inet_connection_sock_af_ops ipv4_specific = {
1864 .queue_xmit = ip_queue_xmit,
1865 .send_check = tcp_v4_send_check,
1866 .rebuild_header = inet_sk_rebuild_header,
1867 .sk_rx_dst_set = inet_sk_rx_dst_set,
1868 .conn_request = tcp_v4_conn_request,
1869 .syn_recv_sock = tcp_v4_syn_recv_sock,
1870 .net_header_len = sizeof(struct iphdr),
1871 .setsockopt = ip_setsockopt,
1872 .getsockopt = ip_getsockopt,
1873 .addr2sockaddr = inet_csk_addr2sockaddr,
1874 .sockaddr_len = sizeof(struct sockaddr_in),
1875#ifdef CONFIG_COMPAT
1876 .compat_setsockopt = compat_ip_setsockopt,
1877 .compat_getsockopt = compat_ip_getsockopt,
1878#endif
1879 .mtu_reduced = tcp_v4_mtu_reduced,
1880};
1881EXPORT_SYMBOL(ipv4_specific);
1882
1883#ifdef CONFIG_TCP_MD5SIG
1884static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1885 .md5_lookup = tcp_v4_md5_lookup,
1886 .calc_md5_hash = tcp_v4_md5_hash_skb,
1887 .md5_parse = tcp_v4_parse_md5_keys,
1888};
1889#endif
1890
1891/* NOTE: A lot of things set to zero explicitly by call to
1892 * sk_alloc() so need not be done here.
1893 */
1894static int tcp_v4_init_sock(struct sock *sk)
1895{
1896 struct inet_connection_sock *icsk = inet_csk(sk);
1897
1898 tcp_init_sock(sk);
1899
1900 icsk->icsk_af_ops = &ipv4_specific;
1901
1902#ifdef CONFIG_TCP_MD5SIG
1903 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1904#endif
1905
1906 return 0;
1907}
1908
1909void tcp_v4_destroy_sock(struct sock *sk)
1910{
1911 struct tcp_sock *tp = tcp_sk(sk);
1912
1913 trace_tcp_destroy_sock(sk);
1914
1915 tcp_clear_xmit_timers(sk);
1916
1917 tcp_cleanup_congestion_control(sk);
1918
1919 tcp_cleanup_ulp(sk);
1920
1921 /* Cleanup up the write buffer. */
1922 tcp_write_queue_purge(sk);
1923
1924 /* Check if we want to disable active TFO */
1925 tcp_fastopen_active_disable_ofo_check(sk);
1926
1927 /* Cleans up our, hopefully empty, out_of_order_queue. */
1928 skb_rbtree_purge(&tp->out_of_order_queue);
1929
1930#ifdef CONFIG_TCP_MD5SIG
1931 /* Clean up the MD5 key list, if any */
1932 if (tp->md5sig_info) {
1933 tcp_clear_md5_list(sk);
1934 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
1935 tp->md5sig_info = NULL;
1936 }
1937#endif
1938
1939 /* Clean up a referenced TCP bind bucket. */
1940 if (inet_csk(sk)->icsk_bind_hash)
1941 inet_put_port(sk);
1942
1943 BUG_ON(tp->fastopen_rsk);
1944
1945 /* If socket is aborted during connect operation */
1946 tcp_free_fastopen_req(tp);
1947 tcp_fastopen_destroy_cipher(sk);
1948 tcp_saved_syn_free(tp);
1949
1950 sk_sockets_allocated_dec(sk);
1951}
1952EXPORT_SYMBOL(tcp_v4_destroy_sock);
1953
1954#ifdef CONFIG_PROC_FS
1955/* Proc filesystem TCP sock list dumping. */
1956
1957/*
1958 * Get next listener socket follow cur. If cur is NULL, get first socket
1959 * starting from bucket given in st->bucket; when st->bucket is zero the
1960 * very first socket in the hash table is returned.
1961 */
1962static void *listening_get_next(struct seq_file *seq, void *cur)
1963{
1964 struct tcp_iter_state *st = seq->private;
1965 struct net *net = seq_file_net(seq);
1966 struct inet_listen_hashbucket *ilb;
1967 struct sock *sk = cur;
1968
1969 if (!sk) {
1970get_head:
1971 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1972 spin_lock(&ilb->lock);
1973 sk = sk_head(&ilb->head);
1974 st->offset = 0;
1975 goto get_sk;
1976 }
1977 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1978 ++st->num;
1979 ++st->offset;
1980
1981 sk = sk_next(sk);
1982get_sk:
1983 sk_for_each_from(sk) {
1984 if (!net_eq(sock_net(sk), net))
1985 continue;
1986 if (sk->sk_family == st->family)
1987 return sk;
1988 }
1989 spin_unlock(&ilb->lock);
1990 st->offset = 0;
1991 if (++st->bucket < INET_LHTABLE_SIZE)
1992 goto get_head;
1993 return NULL;
1994}
1995
1996static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1997{
1998 struct tcp_iter_state *st = seq->private;
1999 void *rc;
2000
2001 st->bucket = 0;
2002 st->offset = 0;
2003 rc = listening_get_next(seq, NULL);
2004
2005 while (rc && *pos) {
2006 rc = listening_get_next(seq, rc);
2007 --*pos;
2008 }
2009 return rc;
2010}
2011
2012static inline bool empty_bucket(const struct tcp_iter_state *st)
2013{
2014 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2015}
2016
2017/*
2018 * Get first established socket starting from bucket given in st->bucket.
2019 * If st->bucket is zero, the very first socket in the hash is returned.
2020 */
2021static void *established_get_first(struct seq_file *seq)
2022{
2023 struct tcp_iter_state *st = seq->private;
2024 struct net *net = seq_file_net(seq);
2025 void *rc = NULL;
2026
2027 st->offset = 0;
2028 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2029 struct sock *sk;
2030 struct hlist_nulls_node *node;
2031 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2032
2033 /* Lockless fast path for the common case of empty buckets */
2034 if (empty_bucket(st))
2035 continue;
2036
2037 spin_lock_bh(lock);
2038 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2039 if (sk->sk_family != st->family ||
2040 !net_eq(sock_net(sk), net)) {
2041 continue;
2042 }
2043 rc = sk;
2044 goto out;
2045 }
2046 spin_unlock_bh(lock);
2047 }
2048out:
2049 return rc;
2050}
2051
2052static void *established_get_next(struct seq_file *seq, void *cur)
2053{
2054 struct sock *sk = cur;
2055 struct hlist_nulls_node *node;
2056 struct tcp_iter_state *st = seq->private;
2057 struct net *net = seq_file_net(seq);
2058
2059 ++st->num;
2060 ++st->offset;
2061
2062 sk = sk_nulls_next(sk);
2063
2064 sk_nulls_for_each_from(sk, node) {
2065 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2066 return sk;
2067 }
2068
2069 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2070 ++st->bucket;
2071 return established_get_first(seq);
2072}
2073
2074static void *established_get_idx(struct seq_file *seq, loff_t pos)
2075{
2076 struct tcp_iter_state *st = seq->private;
2077 void *rc;
2078
2079 st->bucket = 0;
2080 rc = established_get_first(seq);
2081
2082 while (rc && pos) {
2083 rc = established_get_next(seq, rc);
2084 --pos;
2085 }
2086 return rc;
2087}
2088
2089static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2090{
2091 void *rc;
2092 struct tcp_iter_state *st = seq->private;
2093
2094 st->state = TCP_SEQ_STATE_LISTENING;
2095 rc = listening_get_idx(seq, &pos);
2096
2097 if (!rc) {
2098 st->state = TCP_SEQ_STATE_ESTABLISHED;
2099 rc = established_get_idx(seq, pos);
2100 }
2101
2102 return rc;
2103}
2104
2105static void *tcp_seek_last_pos(struct seq_file *seq)
2106{
2107 struct tcp_iter_state *st = seq->private;
2108 int offset = st->offset;
2109 int orig_num = st->num;
2110 void *rc = NULL;
2111
2112 switch (st->state) {
2113 case TCP_SEQ_STATE_LISTENING:
2114 if (st->bucket >= INET_LHTABLE_SIZE)
2115 break;
2116 st->state = TCP_SEQ_STATE_LISTENING;
2117 rc = listening_get_next(seq, NULL);
2118 while (offset-- && rc)
2119 rc = listening_get_next(seq, rc);
2120 if (rc)
2121 break;
2122 st->bucket = 0;
2123 st->state = TCP_SEQ_STATE_ESTABLISHED;
2124 /* Fallthrough */
2125 case TCP_SEQ_STATE_ESTABLISHED:
2126 if (st->bucket > tcp_hashinfo.ehash_mask)
2127 break;
2128 rc = established_get_first(seq);
2129 while (offset-- && rc)
2130 rc = established_get_next(seq, rc);
2131 }
2132
2133 st->num = orig_num;
2134
2135 return rc;
2136}
2137
2138static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2139{
2140 struct tcp_iter_state *st = seq->private;
2141 void *rc;
2142
2143 if (*pos && *pos == st->last_pos) {
2144 rc = tcp_seek_last_pos(seq);
2145 if (rc)
2146 goto out;
2147 }
2148
2149 st->state = TCP_SEQ_STATE_LISTENING;
2150 st->num = 0;
2151 st->bucket = 0;
2152 st->offset = 0;
2153 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2154
2155out:
2156 st->last_pos = *pos;
2157 return rc;
2158}
2159
2160static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2161{
2162 struct tcp_iter_state *st = seq->private;
2163 void *rc = NULL;
2164
2165 if (v == SEQ_START_TOKEN) {
2166 rc = tcp_get_idx(seq, 0);
2167 goto out;
2168 }
2169
2170 switch (st->state) {
2171 case TCP_SEQ_STATE_LISTENING:
2172 rc = listening_get_next(seq, v);
2173 if (!rc) {
2174 st->state = TCP_SEQ_STATE_ESTABLISHED;
2175 st->bucket = 0;
2176 st->offset = 0;
2177 rc = established_get_first(seq);
2178 }
2179 break;
2180 case TCP_SEQ_STATE_ESTABLISHED:
2181 rc = established_get_next(seq, v);
2182 break;
2183 }
2184out:
2185 ++*pos;
2186 st->last_pos = *pos;
2187 return rc;
2188}
2189
2190static void tcp_seq_stop(struct seq_file *seq, void *v)
2191{
2192 struct tcp_iter_state *st = seq->private;
2193
2194 switch (st->state) {
2195 case TCP_SEQ_STATE_LISTENING:
2196 if (v != SEQ_START_TOKEN)
2197 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2198 break;
2199 case TCP_SEQ_STATE_ESTABLISHED:
2200 if (v)
2201 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2202 break;
2203 }
2204}
2205
2206int tcp_seq_open(struct inode *inode, struct file *file)
2207{
2208 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2209 struct tcp_iter_state *s;
2210 int err;
2211
2212 err = seq_open_net(inode, file, &afinfo->seq_ops,
2213 sizeof(struct tcp_iter_state));
2214 if (err < 0)
2215 return err;
2216
2217 s = ((struct seq_file *)file->private_data)->private;
2218 s->family = afinfo->family;
2219 s->last_pos = 0;
2220 return 0;
2221}
2222EXPORT_SYMBOL(tcp_seq_open);
2223
2224int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2225{
2226 int rc = 0;
2227 struct proc_dir_entry *p;
2228
2229 afinfo->seq_ops.start = tcp_seq_start;
2230 afinfo->seq_ops.next = tcp_seq_next;
2231 afinfo->seq_ops.stop = tcp_seq_stop;
2232
2233 p = proc_create_data(afinfo->name, 0444, net->proc_net,
2234 afinfo->seq_fops, afinfo);
2235 if (!p)
2236 rc = -ENOMEM;
2237 return rc;
2238}
2239EXPORT_SYMBOL(tcp_proc_register);
2240
2241void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2242{
2243 remove_proc_entry(afinfo->name, net->proc_net);
2244}
2245EXPORT_SYMBOL(tcp_proc_unregister);
2246
2247static void get_openreq4(const struct request_sock *req,
2248 struct seq_file *f, int i)
2249{
2250 const struct inet_request_sock *ireq = inet_rsk(req);
2251 long delta = req->rsk_timer.expires - jiffies;
2252
2253 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2254 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2255 i,
2256 ireq->ir_loc_addr,
2257 ireq->ir_num,
2258 ireq->ir_rmt_addr,
2259 ntohs(ireq->ir_rmt_port),
2260 TCP_SYN_RECV,
2261 0, 0, /* could print option size, but that is af dependent. */
2262 1, /* timers active (only the expire timer) */
2263 jiffies_delta_to_clock_t(delta),
2264 req->num_timeout,
2265 from_kuid_munged(seq_user_ns(f),
2266 sock_i_uid(req->rsk_listener)),
2267 0, /* non standard timer */
2268 0, /* open_requests have no inode */
2269 0,
2270 req);
2271}
2272
2273static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2274{
2275 int timer_active;
2276 unsigned long timer_expires;
2277 const struct tcp_sock *tp = tcp_sk(sk);
2278 const struct inet_connection_sock *icsk = inet_csk(sk);
2279 const struct inet_sock *inet = inet_sk(sk);
2280 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2281 __be32 dest = inet->inet_daddr;
2282 __be32 src = inet->inet_rcv_saddr;
2283 __u16 destp = ntohs(inet->inet_dport);
2284 __u16 srcp = ntohs(inet->inet_sport);
2285 int rx_queue;
2286 int state;
2287
2288 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2289 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2290 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2291 timer_active = 1;
2292 timer_expires = icsk->icsk_timeout;
2293 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2294 timer_active = 4;
2295 timer_expires = icsk->icsk_timeout;
2296 } else if (timer_pending(&sk->sk_timer)) {
2297 timer_active = 2;
2298 timer_expires = sk->sk_timer.expires;
2299 } else {
2300 timer_active = 0;
2301 timer_expires = jiffies;
2302 }
2303
2304 state = inet_sk_state_load(sk);
2305 if (state == TCP_LISTEN)
2306 rx_queue = sk->sk_ack_backlog;
2307 else
2308 /* Because we don't lock the socket,
2309 * we might find a transient negative value.
2310 */
2311 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2312
2313 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2314 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2315 i, src, srcp, dest, destp, state,
2316 tp->write_seq - tp->snd_una,
2317 rx_queue,
2318 timer_active,
2319 jiffies_delta_to_clock_t(timer_expires - jiffies),
2320 icsk->icsk_retransmits,
2321 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2322 icsk->icsk_probes_out,
2323 sock_i_ino(sk),
2324 refcount_read(&sk->sk_refcnt), sk,
2325 jiffies_to_clock_t(icsk->icsk_rto),
2326 jiffies_to_clock_t(icsk->icsk_ack.ato),
2327 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2328 tp->snd_cwnd,
2329 state == TCP_LISTEN ?
2330 fastopenq->max_qlen :
2331 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2332}
2333
2334static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2335 struct seq_file *f, int i)
2336{
2337 long delta = tw->tw_timer.expires - jiffies;
2338 __be32 dest, src;
2339 __u16 destp, srcp;
2340
2341 dest = tw->tw_daddr;
2342 src = tw->tw_rcv_saddr;
2343 destp = ntohs(tw->tw_dport);
2344 srcp = ntohs(tw->tw_sport);
2345
2346 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2347 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2348 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2349 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2350 refcount_read(&tw->tw_refcnt), tw);
2351}
2352
2353#define TMPSZ 150
2354
2355static int tcp4_seq_show(struct seq_file *seq, void *v)
2356{
2357 struct tcp_iter_state *st;
2358 struct sock *sk = v;
2359
2360 seq_setwidth(seq, TMPSZ - 1);
2361 if (v == SEQ_START_TOKEN) {
2362 seq_puts(seq, " sl local_address rem_address st tx_queue "
2363 "rx_queue tr tm->when retrnsmt uid timeout "
2364 "inode");
2365 goto out;
2366 }
2367 st = seq->private;
2368
2369 if (sk->sk_state == TCP_TIME_WAIT)
2370 get_timewait4_sock(v, seq, st->num);
2371 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2372 get_openreq4(v, seq, st->num);
2373 else
2374 get_tcp4_sock(v, seq, st->num);
2375out:
2376 seq_pad(seq, '\n');
2377 return 0;
2378}
2379
2380static const struct file_operations tcp_afinfo_seq_fops = {
2381 .open = tcp_seq_open,
2382 .read = seq_read,
2383 .llseek = seq_lseek,
2384 .release = seq_release_net
2385};
2386
2387static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2388 .name = "tcp",
2389 .family = AF_INET,
2390 .seq_fops = &tcp_afinfo_seq_fops,
2391 .seq_ops = {
2392 .show = tcp4_seq_show,
2393 },
2394};
2395
2396static int __net_init tcp4_proc_init_net(struct net *net)
2397{
2398 return tcp_proc_register(net, &tcp4_seq_afinfo);
2399}
2400
2401static void __net_exit tcp4_proc_exit_net(struct net *net)
2402{
2403 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2404}
2405
2406static struct pernet_operations tcp4_net_ops = {
2407 .init = tcp4_proc_init_net,
2408 .exit = tcp4_proc_exit_net,
2409};
2410
2411int __init tcp4_proc_init(void)
2412{
2413 return register_pernet_subsys(&tcp4_net_ops);
2414}
2415
2416void tcp4_proc_exit(void)
2417{
2418 unregister_pernet_subsys(&tcp4_net_ops);
2419}
2420#endif /* CONFIG_PROC_FS */
2421
2422struct proto tcp_prot = {
2423 .name = "TCP",
2424 .owner = THIS_MODULE,
2425 .close = tcp_close,
2426 .pre_connect = tcp_v4_pre_connect,
2427 .connect = tcp_v4_connect,
2428 .disconnect = tcp_disconnect,
2429 .accept = inet_csk_accept,
2430 .ioctl = tcp_ioctl,
2431 .init = tcp_v4_init_sock,
2432 .destroy = tcp_v4_destroy_sock,
2433 .shutdown = tcp_shutdown,
2434 .setsockopt = tcp_setsockopt,
2435 .getsockopt = tcp_getsockopt,
2436 .keepalive = tcp_set_keepalive,
2437 .recvmsg = tcp_recvmsg,
2438 .sendmsg = tcp_sendmsg,
2439 .sendpage = tcp_sendpage,
2440 .backlog_rcv = tcp_v4_do_rcv,
2441 .release_cb = tcp_release_cb,
2442 .hash = inet_hash,
2443 .unhash = inet_unhash,
2444 .get_port = inet_csk_get_port,
2445 .enter_memory_pressure = tcp_enter_memory_pressure,
2446 .leave_memory_pressure = tcp_leave_memory_pressure,
2447 .stream_memory_free = tcp_stream_memory_free,
2448 .sockets_allocated = &tcp_sockets_allocated,
2449 .orphan_count = &tcp_orphan_count,
2450 .memory_allocated = &tcp_memory_allocated,
2451 .memory_pressure = &tcp_memory_pressure,
2452 .sysctl_mem = sysctl_tcp_mem,
2453 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2454 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
2455 .max_header = MAX_TCP_HEADER,
2456 .obj_size = sizeof(struct tcp_sock),
2457 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2458 .twsk_prot = &tcp_timewait_sock_ops,
2459 .rsk_prot = &tcp_request_sock_ops,
2460 .h.hashinfo = &tcp_hashinfo,
2461 .no_autobind = true,
2462#ifdef CONFIG_COMPAT
2463 .compat_setsockopt = compat_tcp_setsockopt,
2464 .compat_getsockopt = compat_tcp_getsockopt,
2465#endif
2466 .diag_destroy = tcp_abort,
2467};
2468EXPORT_SYMBOL(tcp_prot);
2469
2470static void __net_exit tcp_sk_exit(struct net *net)
2471{
2472 int cpu;
2473
2474 module_put(net->ipv4.tcp_congestion_control->owner);
2475
2476 for_each_possible_cpu(cpu)
2477 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2478 free_percpu(net->ipv4.tcp_sk);
2479}
2480
2481static int __net_init tcp_sk_init(struct net *net)
2482{
2483 int res, cpu, cnt;
2484
2485 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2486 if (!net->ipv4.tcp_sk)
2487 return -ENOMEM;
2488
2489 for_each_possible_cpu(cpu) {
2490 struct sock *sk;
2491
2492 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2493 IPPROTO_TCP, net);
2494 if (res)
2495 goto fail;
2496 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2497 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2498 }
2499
2500 net->ipv4.sysctl_tcp_ecn = 2;
2501 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2502
2503 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2504 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2505 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2506
2507 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2508 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2509 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2510
2511 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2512 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2513 net->ipv4.sysctl_tcp_syncookies = 1;
2514 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2515 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2516 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2517 net->ipv4.sysctl_tcp_orphan_retries = 0;
2518 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2519 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2520 net->ipv4.sysctl_tcp_tw_reuse = 0;
2521
2522 cnt = tcp_hashinfo.ehash_mask + 1;
2523 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2524 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2525
2526 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2527 net->ipv4.sysctl_tcp_sack = 1;
2528 net->ipv4.sysctl_tcp_window_scaling = 1;
2529 net->ipv4.sysctl_tcp_timestamps = 1;
2530 net->ipv4.sysctl_tcp_early_retrans = 3;
2531 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
2532 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
2533 net->ipv4.sysctl_tcp_retrans_collapse = 1;
2534 net->ipv4.sysctl_tcp_max_reordering = 300;
2535 net->ipv4.sysctl_tcp_dsack = 1;
2536 net->ipv4.sysctl_tcp_app_win = 31;
2537 net->ipv4.sysctl_tcp_adv_win_scale = 1;
2538 net->ipv4.sysctl_tcp_frto = 2;
2539 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
2540 /* This limits the percentage of the congestion window which we
2541 * will allow a single TSO frame to consume. Building TSO frames
2542 * which are too large can cause TCP streams to be bursty.
2543 */
2544 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
2545 /* Default TSQ limit of four TSO segments */
2546 net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
2547 /* rfc5961 challenge ack rate limiting */
2548 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
2549 net->ipv4.sysctl_tcp_min_tso_segs = 2;
2550 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
2551 net->ipv4.sysctl_tcp_autocorking = 1;
2552 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
2553 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
2554 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
2555 if (net != &init_net) {
2556 memcpy(net->ipv4.sysctl_tcp_rmem,
2557 init_net.ipv4.sysctl_tcp_rmem,
2558 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2559 memcpy(net->ipv4.sysctl_tcp_wmem,
2560 init_net.ipv4.sysctl_tcp_wmem,
2561 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2562 }
2563 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
2564 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
2565 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2566 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
2567
2568 /* Reno is always built in */
2569 if (!net_eq(net, &init_net) &&
2570 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2571 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2572 else
2573 net->ipv4.tcp_congestion_control = &tcp_reno;
2574
2575 return 0;
2576fail:
2577 tcp_sk_exit(net);
2578
2579 return res;
2580}
2581
2582static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2583{
2584 struct net *net;
2585
2586 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2587
2588 list_for_each_entry(net, net_exit_list, exit_list)
2589 tcp_fastopen_ctx_destroy(net);
2590}
2591
2592static struct pernet_operations __net_initdata tcp_sk_ops = {
2593 .init = tcp_sk_init,
2594 .exit = tcp_sk_exit,
2595 .exit_batch = tcp_sk_exit_batch,
2596};
2597
2598void __init tcp_v4_init(void)
2599{
2600 if (register_pernet_subsys(&tcp_sk_ops))
2601 panic("Failed to create the TCP control socket.\n");
2602}