Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 *
20 * Fixes:
21 * Alan Cox : Numerous verify_area() calls
22 * Alan Cox : Set the ACK bit on a reset
23 * Alan Cox : Stopped it crashing if it closed while
24 * sk->inuse=1 and was trying to connect
25 * (tcp_err()).
26 * Alan Cox : All icmp error handling was broken
27 * pointers passed where wrong and the
28 * socket was looked up backwards. Nobody
29 * tested any icmp error code obviously.
30 * Alan Cox : tcp_err() now handled properly. It
31 * wakes people on errors. poll
32 * behaves and the icmp error race
33 * has gone by moving it into sock.c
34 * Alan Cox : tcp_send_reset() fixed to work for
35 * everything not just packets for
36 * unknown sockets.
37 * Alan Cox : tcp option processing.
38 * Alan Cox : Reset tweaked (still not 100%) [Had
39 * syn rule wrong]
40 * Herp Rosmanith : More reset fixes
41 * Alan Cox : No longer acks invalid rst frames.
42 * Acking any kind of RST is right out.
43 * Alan Cox : Sets an ignore me flag on an rst
44 * receive otherwise odd bits of prattle
45 * escape still
46 * Alan Cox : Fixed another acking RST frame bug.
47 * Should stop LAN workplace lockups.
48 * Alan Cox : Some tidyups using the new skb list
49 * facilities
50 * Alan Cox : sk->keepopen now seems to work
51 * Alan Cox : Pulls options out correctly on accepts
52 * Alan Cox : Fixed assorted sk->rqueue->next errors
53 * Alan Cox : PSH doesn't end a TCP read. Switched a
54 * bit to skb ops.
55 * Alan Cox : Tidied tcp_data to avoid a potential
56 * nasty.
57 * Alan Cox : Added some better commenting, as the
58 * tcp is hard to follow
59 * Alan Cox : Removed incorrect check for 20 * psh
60 * Michael O'Reilly : ack < copied bug fix.
61 * Johannes Stille : Misc tcp fixes (not all in yet).
62 * Alan Cox : FIN with no memory -> CRASH
63 * Alan Cox : Added socket option proto entries.
64 * Also added awareness of them to accept.
65 * Alan Cox : Added TCP options (SOL_TCP)
66 * Alan Cox : Switched wakeup calls to callbacks,
67 * so the kernel can layer network
68 * sockets.
69 * Alan Cox : Use ip_tos/ip_ttl settings.
70 * Alan Cox : Handle FIN (more) properly (we hope).
71 * Alan Cox : RST frames sent on unsynchronised
72 * state ack error.
73 * Alan Cox : Put in missing check for SYN bit.
74 * Alan Cox : Added tcp_select_window() aka NET2E
75 * window non shrink trick.
76 * Alan Cox : Added a couple of small NET2E timer
77 * fixes
78 * Charles Hedrick : TCP fixes
79 * Toomas Tamm : TCP window fixes
80 * Alan Cox : Small URG fix to rlogin ^C ack fight
81 * Charles Hedrick : Rewrote most of it to actually work
82 * Linus : Rewrote tcp_read() and URG handling
83 * completely
84 * Gerhard Koerting: Fixed some missing timer handling
85 * Matthew Dillon : Reworked TCP machine states as per RFC
86 * Gerhard Koerting: PC/TCP workarounds
87 * Adam Caldwell : Assorted timer/timing errors
88 * Matthew Dillon : Fixed another RST bug
89 * Alan Cox : Move to kernel side addressing changes.
90 * Alan Cox : Beginning work on TCP fastpathing
91 * (not yet usable)
92 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
93 * Alan Cox : TCP fast path debugging
94 * Alan Cox : Window clamping
95 * Michael Riepe : Bug in tcp_check()
96 * Matt Dillon : More TCP improvements and RST bug fixes
97 * Matt Dillon : Yet more small nasties remove from the
98 * TCP code (Be very nice to this man if
99 * tcp finally works 100%) 8)
100 * Alan Cox : BSD accept semantics.
101 * Alan Cox : Reset on closedown bug.
102 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
103 * Michael Pall : Handle poll() after URG properly in
104 * all cases.
105 * Michael Pall : Undo the last fix in tcp_read_urg()
106 * (multi URG PUSH broke rlogin).
107 * Michael Pall : Fix the multi URG PUSH problem in
108 * tcp_readable(), poll() after URG
109 * works now.
110 * Michael Pall : recv(...,MSG_OOB) never blocks in the
111 * BSD api.
112 * Alan Cox : Changed the semantics of sk->socket to
113 * fix a race and a signal problem with
114 * accept() and async I/O.
115 * Alan Cox : Relaxed the rules on tcp_sendto().
116 * Yury Shevchuk : Really fixed accept() blocking problem.
117 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
118 * clients/servers which listen in on
119 * fixed ports.
120 * Alan Cox : Cleaned the above up and shrank it to
121 * a sensible code size.
122 * Alan Cox : Self connect lockup fix.
123 * Alan Cox : No connect to multicast.
124 * Ross Biro : Close unaccepted children on master
125 * socket close.
126 * Alan Cox : Reset tracing code.
127 * Alan Cox : Spurious resets on shutdown.
128 * Alan Cox : Giant 15 minute/60 second timer error
129 * Alan Cox : Small whoops in polling before an
130 * accept.
131 * Alan Cox : Kept the state trace facility since
132 * it's handy for debugging.
133 * Alan Cox : More reset handler fixes.
134 * Alan Cox : Started rewriting the code based on
135 * the RFC's for other useful protocol
136 * references see: Comer, KA9Q NOS, and
137 * for a reference on the difference
138 * between specifications and how BSD
139 * works see the 4.4lite source.
140 * A.N.Kuznetsov : Don't time wait on completion of tidy
141 * close.
142 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
143 * Linus Torvalds : Fixed BSD port reuse to work first syn
144 * Alan Cox : Reimplemented timers as per the RFC
145 * and using multiple timers for sanity.
146 * Alan Cox : Small bug fixes, and a lot of new
147 * comments.
148 * Alan Cox : Fixed dual reader crash by locking
149 * the buffers (much like datagram.c)
150 * Alan Cox : Fixed stuck sockets in probe. A probe
151 * now gets fed up of retrying without
152 * (even a no space) answer.
153 * Alan Cox : Extracted closing code better
154 * Alan Cox : Fixed the closing state machine to
155 * resemble the RFC.
156 * Alan Cox : More 'per spec' fixes.
157 * Jorge Cwik : Even faster checksumming.
158 * Alan Cox : tcp_data() doesn't ack illegal PSH
159 * only frames. At least one pc tcp stack
160 * generates them.
161 * Alan Cox : Cache last socket.
162 * Alan Cox : Per route irtt.
163 * Matt Day : poll()->select() match BSD precisely on error
164 * Alan Cox : New buffers
165 * Marc Tamsky : Various sk->prot->retransmits and
166 * sk->retransmits misupdating fixed.
167 * Fixed tcp_write_timeout: stuck close,
168 * and TCP syn retries gets used now.
169 * Mark Yarvis : In tcp_read_wakeup(), don't send an
170 * ack if state is TCP_CLOSED.
171 * Alan Cox : Look up device on a retransmit - routes may
172 * change. Doesn't yet cope with MSS shrink right
173 * but it's a start!
174 * Marc Tamsky : Closing in closing fixes.
175 * Mike Shaver : RFC1122 verifications.
176 * Alan Cox : rcv_saddr errors.
177 * Alan Cox : Block double connect().
178 * Alan Cox : Small hooks for enSKIP.
179 * Alexey Kuznetsov: Path MTU discovery.
180 * Alan Cox : Support soft errors.
181 * Alan Cox : Fix MTU discovery pathological case
182 * when the remote claims no mtu!
183 * Marc Tamsky : TCP_CLOSE fix.
184 * Colin (G3TNE) : Send a reset on syn ack replies in
185 * window but wrong (fixes NT lpd problems)
186 * Pedro Roque : Better TCP window handling, delayed ack.
187 * Joerg Reuter : No modification of locked buffers in
188 * tcp_do_retransmit()
189 * Eric Schenk : Changed receiver side silly window
190 * avoidance algorithm to BSD style
191 * algorithm. This doubles throughput
192 * against machines running Solaris,
193 * and seems to result in general
194 * improvement.
195 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
196 * Willy Konynenberg : Transparent proxying support.
197 * Mike McLagan : Routing by source
198 * Keith Owens : Do proper merging with partial SKB's in
199 * tcp_do_sendmsg to avoid burstiness.
200 * Eric Schenk : Fix fast close down bug with
201 * shutdown() followed by close().
202 * Andi Kleen : Make poll agree with SIGIO
203 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
204 * lingertime == 0 (RFC 793 ABORT Call)
205 * Hirokazu Takahashi : Use copy_from_user() instead of
206 * csum_and_copy_from_user() if possible.
207 *
208 * This program is free software; you can redistribute it and/or
209 * modify it under the terms of the GNU General Public License
210 * as published by the Free Software Foundation; either version
211 * 2 of the License, or(at your option) any later version.
212 *
213 * Description of States:
214 *
215 * TCP_SYN_SENT sent a connection request, waiting for ack
216 *
217 * TCP_SYN_RECV received a connection request, sent ack,
218 * waiting for final ack in three-way handshake.
219 *
220 * TCP_ESTABLISHED connection established
221 *
222 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
223 * transmission of remaining buffered data
224 *
225 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
226 * to shutdown
227 *
228 * TCP_CLOSING both sides have shutdown but we still have
229 * data we have to finish sending
230 *
231 * TCP_TIME_WAIT timeout to catch resent junk before entering
232 * closed, can only be entered from FIN_WAIT2
233 * or CLOSING. Required because the other end
234 * may not have gotten our last ACK causing it
235 * to retransmit the data packet (which we ignore)
236 *
237 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
238 * us to finish writing our data and to shutdown
239 * (we have to close() to move on to LAST_ACK)
240 *
241 * TCP_LAST_ACK out side has shutdown after remote has
242 * shutdown. There may still be data in our
243 * buffer that we have to finish sending
244 *
245 * TCP_CLOSE socket is finished
246 */
247
248#define pr_fmt(fmt) "TCP: " fmt
249
250#include <linux/kernel.h>
251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
256#include <linux/fs.h>
257#include <linux/skbuff.h>
258#include <linux/scatterlist.h>
259#include <linux/splice.h>
260#include <linux/net.h>
261#include <linux/socket.h>
262#include <linux/random.h>
263#include <linux/bootmem.h>
264#include <linux/highmem.h>
265#include <linux/swap.h>
266#include <linux/cache.h>
267#include <linux/err.h>
268#include <linux/crypto.h>
269#include <linux/time.h>
270#include <linux/slab.h>
271
272#include <net/icmp.h>
273#include <net/inet_common.h>
274#include <net/tcp.h>
275#include <net/xfrm.h>
276#include <net/ip.h>
277#include <net/netdma.h>
278#include <net/sock.h>
279
280#include <asm/uaccess.h>
281#include <asm/ioctls.h>
282#include <net/busy_poll.h>
283
284int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
285
286int sysctl_tcp_min_tso_segs __read_mostly = 2;
287
288int sysctl_tcp_autocorking __read_mostly = 1;
289
290struct percpu_counter tcp_orphan_count;
291EXPORT_SYMBOL_GPL(tcp_orphan_count);
292
293long sysctl_tcp_mem[3] __read_mostly;
294int sysctl_tcp_wmem[3] __read_mostly;
295int sysctl_tcp_rmem[3] __read_mostly;
296
297EXPORT_SYMBOL(sysctl_tcp_mem);
298EXPORT_SYMBOL(sysctl_tcp_rmem);
299EXPORT_SYMBOL(sysctl_tcp_wmem);
300
301atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
302EXPORT_SYMBOL(tcp_memory_allocated);
303
304/*
305 * Current number of TCP sockets.
306 */
307struct percpu_counter tcp_sockets_allocated;
308EXPORT_SYMBOL(tcp_sockets_allocated);
309
310/*
311 * TCP splice context
312 */
313struct tcp_splice_state {
314 struct pipe_inode_info *pipe;
315 size_t len;
316 unsigned int flags;
317};
318
319/*
320 * Pressure flag: try to collapse.
321 * Technical note: it is used by multiple contexts non atomically.
322 * All the __sk_mem_schedule() is of this nature: accounting
323 * is strict, actions are advisory and have some latency.
324 */
325int tcp_memory_pressure __read_mostly;
326EXPORT_SYMBOL(tcp_memory_pressure);
327
328void tcp_enter_memory_pressure(struct sock *sk)
329{
330 if (!tcp_memory_pressure) {
331 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
332 tcp_memory_pressure = 1;
333 }
334}
335EXPORT_SYMBOL(tcp_enter_memory_pressure);
336
337/* Convert seconds to retransmits based on initial and max timeout */
338static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
339{
340 u8 res = 0;
341
342 if (seconds > 0) {
343 int period = timeout;
344
345 res = 1;
346 while (seconds > period && res < 255) {
347 res++;
348 timeout <<= 1;
349 if (timeout > rto_max)
350 timeout = rto_max;
351 period += timeout;
352 }
353 }
354 return res;
355}
356
357/* Convert retransmits to seconds based on initial and max timeout */
358static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
359{
360 int period = 0;
361
362 if (retrans > 0) {
363 period = timeout;
364 while (--retrans) {
365 timeout <<= 1;
366 if (timeout > rto_max)
367 timeout = rto_max;
368 period += timeout;
369 }
370 }
371 return period;
372}
373
374/* Address-family independent initialization for a tcp_sock.
375 *
376 * NOTE: A lot of things set to zero explicitly by call to
377 * sk_alloc() so need not be done here.
378 */
379void tcp_init_sock(struct sock *sk)
380{
381 struct inet_connection_sock *icsk = inet_csk(sk);
382 struct tcp_sock *tp = tcp_sk(sk);
383
384 __skb_queue_head_init(&tp->out_of_order_queue);
385 tcp_init_xmit_timers(sk);
386 tcp_prequeue_init(tp);
387 INIT_LIST_HEAD(&tp->tsq_node);
388
389 icsk->icsk_rto = TCP_TIMEOUT_INIT;
390 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
391
392 /* So many TCP implementations out there (incorrectly) count the
393 * initial SYN frame in their delayed-ACK and congestion control
394 * algorithms that we must have the following bandaid to talk
395 * efficiently to them. -DaveM
396 */
397 tp->snd_cwnd = TCP_INIT_CWND;
398
399 /* See draft-stevens-tcpca-spec-01 for discussion of the
400 * initialization of these values.
401 */
402 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
403 tp->snd_cwnd_clamp = ~0;
404 tp->mss_cache = TCP_MSS_DEFAULT;
405
406 tp->reordering = sysctl_tcp_reordering;
407 tcp_enable_early_retrans(tp);
408 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
409
410 tp->tsoffset = 0;
411
412 sk->sk_state = TCP_CLOSE;
413
414 sk->sk_write_space = sk_stream_write_space;
415 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
416
417 icsk->icsk_sync_mss = tcp_sync_mss;
418
419 sk->sk_sndbuf = sysctl_tcp_wmem[1];
420 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
421
422 local_bh_disable();
423 sock_update_memcg(sk);
424 sk_sockets_allocated_inc(sk);
425 local_bh_enable();
426}
427EXPORT_SYMBOL(tcp_init_sock);
428
429/*
430 * Wait for a TCP event.
431 *
432 * Note that we don't need to lock the socket, as the upper poll layers
433 * take care of normal races (between the test and the event) and we don't
434 * go look at any of the socket buffers directly.
435 */
436unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
437{
438 unsigned int mask;
439 struct sock *sk = sock->sk;
440 const struct tcp_sock *tp = tcp_sk(sk);
441
442 sock_rps_record_flow(sk);
443
444 sock_poll_wait(file, sk_sleep(sk), wait);
445 if (sk->sk_state == TCP_LISTEN)
446 return inet_csk_listen_poll(sk);
447
448 /* Socket is not locked. We are protected from async events
449 * by poll logic and correct handling of state changes
450 * made by other threads is impossible in any case.
451 */
452
453 mask = 0;
454
455 /*
456 * POLLHUP is certainly not done right. But poll() doesn't
457 * have a notion of HUP in just one direction, and for a
458 * socket the read side is more interesting.
459 *
460 * Some poll() documentation says that POLLHUP is incompatible
461 * with the POLLOUT/POLLWR flags, so somebody should check this
462 * all. But careful, it tends to be safer to return too many
463 * bits than too few, and you can easily break real applications
464 * if you don't tell them that something has hung up!
465 *
466 * Check-me.
467 *
468 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
469 * our fs/select.c). It means that after we received EOF,
470 * poll always returns immediately, making impossible poll() on write()
471 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
472 * if and only if shutdown has been made in both directions.
473 * Actually, it is interesting to look how Solaris and DUX
474 * solve this dilemma. I would prefer, if POLLHUP were maskable,
475 * then we could set it on SND_SHUTDOWN. BTW examples given
476 * in Stevens' books assume exactly this behaviour, it explains
477 * why POLLHUP is incompatible with POLLOUT. --ANK
478 *
479 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
480 * blocking on fresh not-connected or disconnected socket. --ANK
481 */
482 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
483 mask |= POLLHUP;
484 if (sk->sk_shutdown & RCV_SHUTDOWN)
485 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
486
487 /* Connected or passive Fast Open socket? */
488 if (sk->sk_state != TCP_SYN_SENT &&
489 (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
490 int target = sock_rcvlowat(sk, 0, INT_MAX);
491
492 if (tp->urg_seq == tp->copied_seq &&
493 !sock_flag(sk, SOCK_URGINLINE) &&
494 tp->urg_data)
495 target++;
496
497 /* Potential race condition. If read of tp below will
498 * escape above sk->sk_state, we can be illegally awaken
499 * in SYN_* states. */
500 if (tp->rcv_nxt - tp->copied_seq >= target)
501 mask |= POLLIN | POLLRDNORM;
502
503 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
504 if (sk_stream_is_writeable(sk)) {
505 mask |= POLLOUT | POLLWRNORM;
506 } else { /* send SIGIO later */
507 set_bit(SOCK_ASYNC_NOSPACE,
508 &sk->sk_socket->flags);
509 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
510
511 /* Race breaker. If space is freed after
512 * wspace test but before the flags are set,
513 * IO signal will be lost.
514 */
515 if (sk_stream_is_writeable(sk))
516 mask |= POLLOUT | POLLWRNORM;
517 }
518 } else
519 mask |= POLLOUT | POLLWRNORM;
520
521 if (tp->urg_data & TCP_URG_VALID)
522 mask |= POLLPRI;
523 }
524 /* This barrier is coupled with smp_wmb() in tcp_reset() */
525 smp_rmb();
526 if (sk->sk_err)
527 mask |= POLLERR;
528
529 return mask;
530}
531EXPORT_SYMBOL(tcp_poll);
532
533int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
534{
535 struct tcp_sock *tp = tcp_sk(sk);
536 int answ;
537 bool slow;
538
539 switch (cmd) {
540 case SIOCINQ:
541 if (sk->sk_state == TCP_LISTEN)
542 return -EINVAL;
543
544 slow = lock_sock_fast(sk);
545 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
546 answ = 0;
547 else if (sock_flag(sk, SOCK_URGINLINE) ||
548 !tp->urg_data ||
549 before(tp->urg_seq, tp->copied_seq) ||
550 !before(tp->urg_seq, tp->rcv_nxt)) {
551
552 answ = tp->rcv_nxt - tp->copied_seq;
553
554 /* Subtract 1, if FIN was received */
555 if (answ && sock_flag(sk, SOCK_DONE))
556 answ--;
557 } else
558 answ = tp->urg_seq - tp->copied_seq;
559 unlock_sock_fast(sk, slow);
560 break;
561 case SIOCATMARK:
562 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
563 break;
564 case SIOCOUTQ:
565 if (sk->sk_state == TCP_LISTEN)
566 return -EINVAL;
567
568 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
569 answ = 0;
570 else
571 answ = tp->write_seq - tp->snd_una;
572 break;
573 case SIOCOUTQNSD:
574 if (sk->sk_state == TCP_LISTEN)
575 return -EINVAL;
576
577 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
578 answ = 0;
579 else
580 answ = tp->write_seq - tp->snd_nxt;
581 break;
582 default:
583 return -ENOIOCTLCMD;
584 }
585
586 return put_user(answ, (int __user *)arg);
587}
588EXPORT_SYMBOL(tcp_ioctl);
589
590static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
591{
592 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
593 tp->pushed_seq = tp->write_seq;
594}
595
596static inline bool forced_push(const struct tcp_sock *tp)
597{
598 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
599}
600
601static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
602{
603 struct tcp_sock *tp = tcp_sk(sk);
604 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
605
606 skb->csum = 0;
607 tcb->seq = tcb->end_seq = tp->write_seq;
608 tcb->tcp_flags = TCPHDR_ACK;
609 tcb->sacked = 0;
610 skb_header_release(skb);
611 tcp_add_write_queue_tail(sk, skb);
612 sk->sk_wmem_queued += skb->truesize;
613 sk_mem_charge(sk, skb->truesize);
614 if (tp->nonagle & TCP_NAGLE_PUSH)
615 tp->nonagle &= ~TCP_NAGLE_PUSH;
616}
617
618static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
619{
620 if (flags & MSG_OOB)
621 tp->snd_up = tp->write_seq;
622}
623
624/* If a not yet filled skb is pushed, do not send it if
625 * we have data packets in Qdisc or NIC queues :
626 * Because TX completion will happen shortly, it gives a chance
627 * to coalesce future sendmsg() payload into this skb, without
628 * need for a timer, and with no latency trade off.
629 * As packets containing data payload have a bigger truesize
630 * than pure acks (dataless) packets, the last checks prevent
631 * autocorking if we only have an ACK in Qdisc/NIC queues,
632 * or if TX completion was delayed after we processed ACK packet.
633 */
634static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
635 int size_goal)
636{
637 return skb->len < size_goal &&
638 sysctl_tcp_autocorking &&
639 skb != tcp_write_queue_head(sk) &&
640 atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
641}
642
643static void tcp_push(struct sock *sk, int flags, int mss_now,
644 int nonagle, int size_goal)
645{
646 struct tcp_sock *tp = tcp_sk(sk);
647 struct sk_buff *skb;
648
649 if (!tcp_send_head(sk))
650 return;
651
652 skb = tcp_write_queue_tail(sk);
653 if (!(flags & MSG_MORE) || forced_push(tp))
654 tcp_mark_push(tp, skb);
655
656 tcp_mark_urg(tp, flags);
657
658 if (tcp_should_autocork(sk, skb, size_goal)) {
659
660 /* avoid atomic op if TSQ_THROTTLED bit is already set */
661 if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
662 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
663 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
664 }
665 /* It is possible TX completion already happened
666 * before we set TSQ_THROTTLED.
667 */
668 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
669 return;
670 }
671
672 if (flags & MSG_MORE)
673 nonagle = TCP_NAGLE_CORK;
674
675 __tcp_push_pending_frames(sk, mss_now, nonagle);
676}
677
678static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
679 unsigned int offset, size_t len)
680{
681 struct tcp_splice_state *tss = rd_desc->arg.data;
682 int ret;
683
684 ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
685 tss->flags);
686 if (ret > 0)
687 rd_desc->count -= ret;
688 return ret;
689}
690
691static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
692{
693 /* Store TCP splice context information in read_descriptor_t. */
694 read_descriptor_t rd_desc = {
695 .arg.data = tss,
696 .count = tss->len,
697 };
698
699 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
700}
701
702/**
703 * tcp_splice_read - splice data from TCP socket to a pipe
704 * @sock: socket to splice from
705 * @ppos: position (not valid)
706 * @pipe: pipe to splice to
707 * @len: number of bytes to splice
708 * @flags: splice modifier flags
709 *
710 * Description:
711 * Will read pages from given socket and fill them into a pipe.
712 *
713 **/
714ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
715 struct pipe_inode_info *pipe, size_t len,
716 unsigned int flags)
717{
718 struct sock *sk = sock->sk;
719 struct tcp_splice_state tss = {
720 .pipe = pipe,
721 .len = len,
722 .flags = flags,
723 };
724 long timeo;
725 ssize_t spliced;
726 int ret;
727
728 sock_rps_record_flow(sk);
729 /*
730 * We can't seek on a socket input
731 */
732 if (unlikely(*ppos))
733 return -ESPIPE;
734
735 ret = spliced = 0;
736
737 lock_sock(sk);
738
739 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
740 while (tss.len) {
741 ret = __tcp_splice_read(sk, &tss);
742 if (ret < 0)
743 break;
744 else if (!ret) {
745 if (spliced)
746 break;
747 if (sock_flag(sk, SOCK_DONE))
748 break;
749 if (sk->sk_err) {
750 ret = sock_error(sk);
751 break;
752 }
753 if (sk->sk_shutdown & RCV_SHUTDOWN)
754 break;
755 if (sk->sk_state == TCP_CLOSE) {
756 /*
757 * This occurs when user tries to read
758 * from never connected socket.
759 */
760 if (!sock_flag(sk, SOCK_DONE))
761 ret = -ENOTCONN;
762 break;
763 }
764 if (!timeo) {
765 ret = -EAGAIN;
766 break;
767 }
768 sk_wait_data(sk, &timeo);
769 if (signal_pending(current)) {
770 ret = sock_intr_errno(timeo);
771 break;
772 }
773 continue;
774 }
775 tss.len -= ret;
776 spliced += ret;
777
778 if (!timeo)
779 break;
780 release_sock(sk);
781 lock_sock(sk);
782
783 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
784 (sk->sk_shutdown & RCV_SHUTDOWN) ||
785 signal_pending(current))
786 break;
787 }
788
789 release_sock(sk);
790
791 if (spliced)
792 return spliced;
793
794 return ret;
795}
796EXPORT_SYMBOL(tcp_splice_read);
797
798struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
799{
800 struct sk_buff *skb;
801
802 /* The TCP header must be at least 32-bit aligned. */
803 size = ALIGN(size, 4);
804
805 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
806 if (skb) {
807 if (sk_wmem_schedule(sk, skb->truesize)) {
808 skb_reserve(skb, sk->sk_prot->max_header);
809 /*
810 * Make sure that we have exactly size bytes
811 * available to the caller, no more, no less.
812 */
813 skb->reserved_tailroom = skb->end - skb->tail - size;
814 return skb;
815 }
816 __kfree_skb(skb);
817 } else {
818 sk->sk_prot->enter_memory_pressure(sk);
819 sk_stream_moderate_sndbuf(sk);
820 }
821 return NULL;
822}
823
824static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
825 int large_allowed)
826{
827 struct tcp_sock *tp = tcp_sk(sk);
828 u32 xmit_size_goal, old_size_goal;
829
830 xmit_size_goal = mss_now;
831
832 if (large_allowed && sk_can_gso(sk)) {
833 u32 gso_size, hlen;
834
835 /* Maybe we should/could use sk->sk_prot->max_header here ? */
836 hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
837 inet_csk(sk)->icsk_ext_hdr_len +
838 tp->tcp_header_len;
839
840 /* Goal is to send at least one packet per ms,
841 * not one big TSO packet every 100 ms.
842 * This preserves ACK clocking and is consistent
843 * with tcp_tso_should_defer() heuristic.
844 */
845 gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
846 gso_size = max_t(u32, gso_size,
847 sysctl_tcp_min_tso_segs * mss_now);
848
849 xmit_size_goal = min_t(u32, gso_size,
850 sk->sk_gso_max_size - 1 - hlen);
851
852 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
853
854 /* We try hard to avoid divides here */
855 old_size_goal = tp->xmit_size_goal_segs * mss_now;
856
857 if (likely(old_size_goal <= xmit_size_goal &&
858 old_size_goal + mss_now > xmit_size_goal)) {
859 xmit_size_goal = old_size_goal;
860 } else {
861 tp->xmit_size_goal_segs =
862 min_t(u16, xmit_size_goal / mss_now,
863 sk->sk_gso_max_segs);
864 xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
865 }
866 }
867
868 return max(xmit_size_goal, mss_now);
869}
870
871static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
872{
873 int mss_now;
874
875 mss_now = tcp_current_mss(sk);
876 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
877
878 return mss_now;
879}
880
881static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
882 size_t size, int flags)
883{
884 struct tcp_sock *tp = tcp_sk(sk);
885 int mss_now, size_goal;
886 int err;
887 ssize_t copied;
888 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
889
890 /* Wait for a connection to finish. One exception is TCP Fast Open
891 * (passive side) where data is allowed to be sent before a connection
892 * is fully established.
893 */
894 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
895 !tcp_passive_fastopen(sk)) {
896 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
897 goto out_err;
898 }
899
900 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
901
902 mss_now = tcp_send_mss(sk, &size_goal, flags);
903 copied = 0;
904
905 err = -EPIPE;
906 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
907 goto out_err;
908
909 while (size > 0) {
910 struct sk_buff *skb = tcp_write_queue_tail(sk);
911 int copy, i;
912 bool can_coalesce;
913
914 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
915new_segment:
916 if (!sk_stream_memory_free(sk))
917 goto wait_for_sndbuf;
918
919 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
920 if (!skb)
921 goto wait_for_memory;
922
923 skb_entail(sk, skb);
924 copy = size_goal;
925 }
926
927 if (copy > size)
928 copy = size;
929
930 i = skb_shinfo(skb)->nr_frags;
931 can_coalesce = skb_can_coalesce(skb, i, page, offset);
932 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
933 tcp_mark_push(tp, skb);
934 goto new_segment;
935 }
936 if (!sk_wmem_schedule(sk, copy))
937 goto wait_for_memory;
938
939 if (can_coalesce) {
940 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
941 } else {
942 get_page(page);
943 skb_fill_page_desc(skb, i, page, offset, copy);
944 }
945 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
946
947 skb->len += copy;
948 skb->data_len += copy;
949 skb->truesize += copy;
950 sk->sk_wmem_queued += copy;
951 sk_mem_charge(sk, copy);
952 skb->ip_summed = CHECKSUM_PARTIAL;
953 tp->write_seq += copy;
954 TCP_SKB_CB(skb)->end_seq += copy;
955 skb_shinfo(skb)->gso_segs = 0;
956
957 if (!copied)
958 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
959
960 copied += copy;
961 offset += copy;
962 if (!(size -= copy))
963 goto out;
964
965 if (skb->len < size_goal || (flags & MSG_OOB))
966 continue;
967
968 if (forced_push(tp)) {
969 tcp_mark_push(tp, skb);
970 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
971 } else if (skb == tcp_send_head(sk))
972 tcp_push_one(sk, mss_now);
973 continue;
974
975wait_for_sndbuf:
976 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
977wait_for_memory:
978 tcp_push(sk, flags & ~MSG_MORE, mss_now,
979 TCP_NAGLE_PUSH, size_goal);
980
981 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
982 goto do_error;
983
984 mss_now = tcp_send_mss(sk, &size_goal, flags);
985 }
986
987out:
988 if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
989 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
990 return copied;
991
992do_error:
993 if (copied)
994 goto out;
995out_err:
996 return sk_stream_error(sk, flags, err);
997}
998
999int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1000 size_t size, int flags)
1001{
1002 ssize_t res;
1003
1004 if (!(sk->sk_route_caps & NETIF_F_SG) ||
1005 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1006 return sock_no_sendpage(sk->sk_socket, page, offset, size,
1007 flags);
1008
1009 lock_sock(sk);
1010 res = do_tcp_sendpages(sk, page, offset, size, flags);
1011 release_sock(sk);
1012 return res;
1013}
1014EXPORT_SYMBOL(tcp_sendpage);
1015
1016static inline int select_size(const struct sock *sk, bool sg)
1017{
1018 const struct tcp_sock *tp = tcp_sk(sk);
1019 int tmp = tp->mss_cache;
1020
1021 if (sg) {
1022 if (sk_can_gso(sk)) {
1023 /* Small frames wont use a full page:
1024 * Payload will immediately follow tcp header.
1025 */
1026 tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1027 } else {
1028 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1029
1030 if (tmp >= pgbreak &&
1031 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1032 tmp = pgbreak;
1033 }
1034 }
1035
1036 return tmp;
1037}
1038
1039void tcp_free_fastopen_req(struct tcp_sock *tp)
1040{
1041 if (tp->fastopen_req != NULL) {
1042 kfree(tp->fastopen_req);
1043 tp->fastopen_req = NULL;
1044 }
1045}
1046
1047static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1048 int *copied, size_t size)
1049{
1050 struct tcp_sock *tp = tcp_sk(sk);
1051 int err, flags;
1052
1053 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1054 return -EOPNOTSUPP;
1055 if (tp->fastopen_req != NULL)
1056 return -EALREADY; /* Another Fast Open is in progress */
1057
1058 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1059 sk->sk_allocation);
1060 if (unlikely(tp->fastopen_req == NULL))
1061 return -ENOBUFS;
1062 tp->fastopen_req->data = msg;
1063 tp->fastopen_req->size = size;
1064
1065 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1066 err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1067 msg->msg_namelen, flags);
1068 *copied = tp->fastopen_req->copied;
1069 tcp_free_fastopen_req(tp);
1070 return err;
1071}
1072
1073int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1074 size_t size)
1075{
1076 struct iovec *iov;
1077 struct tcp_sock *tp = tcp_sk(sk);
1078 struct sk_buff *skb;
1079 int iovlen, flags, err, copied = 0;
1080 int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
1081 bool sg;
1082 long timeo;
1083
1084 lock_sock(sk);
1085
1086 flags = msg->msg_flags;
1087 if (flags & MSG_FASTOPEN) {
1088 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1089 if (err == -EINPROGRESS && copied_syn > 0)
1090 goto out;
1091 else if (err)
1092 goto out_err;
1093 offset = copied_syn;
1094 }
1095
1096 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1097
1098 /* Wait for a connection to finish. One exception is TCP Fast Open
1099 * (passive side) where data is allowed to be sent before a connection
1100 * is fully established.
1101 */
1102 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1103 !tcp_passive_fastopen(sk)) {
1104 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
1105 goto do_error;
1106 }
1107
1108 if (unlikely(tp->repair)) {
1109 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110 copied = tcp_send_rcvq(sk, msg, size);
1111 goto out;
1112 }
1113
1114 err = -EINVAL;
1115 if (tp->repair_queue == TCP_NO_QUEUE)
1116 goto out_err;
1117
1118 /* 'common' sending to sendq */
1119 }
1120
1121 /* This should be in poll */
1122 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1123
1124 mss_now = tcp_send_mss(sk, &size_goal, flags);
1125
1126 /* Ok commence sending. */
1127 iovlen = msg->msg_iovlen;
1128 iov = msg->msg_iov;
1129 copied = 0;
1130
1131 err = -EPIPE;
1132 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1133 goto out_err;
1134
1135 sg = !!(sk->sk_route_caps & NETIF_F_SG);
1136
1137 while (--iovlen >= 0) {
1138 size_t seglen = iov->iov_len;
1139 unsigned char __user *from = iov->iov_base;
1140
1141 iov++;
1142 if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
1143 if (offset >= seglen) {
1144 offset -= seglen;
1145 continue;
1146 }
1147 seglen -= offset;
1148 from += offset;
1149 offset = 0;
1150 }
1151
1152 while (seglen > 0) {
1153 int copy = 0;
1154 int max = size_goal;
1155
1156 skb = tcp_write_queue_tail(sk);
1157 if (tcp_send_head(sk)) {
1158 if (skb->ip_summed == CHECKSUM_NONE)
1159 max = mss_now;
1160 copy = max - skb->len;
1161 }
1162
1163 if (copy <= 0) {
1164new_segment:
1165 /* Allocate new segment. If the interface is SG,
1166 * allocate skb fitting to single page.
1167 */
1168 if (!sk_stream_memory_free(sk))
1169 goto wait_for_sndbuf;
1170
1171 skb = sk_stream_alloc_skb(sk,
1172 select_size(sk, sg),
1173 sk->sk_allocation);
1174 if (!skb)
1175 goto wait_for_memory;
1176
1177 /*
1178 * All packets are restored as if they have
1179 * already been sent.
1180 */
1181 if (tp->repair)
1182 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1183
1184 /*
1185 * Check whether we can use HW checksum.
1186 */
1187 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
1188 skb->ip_summed = CHECKSUM_PARTIAL;
1189
1190 skb_entail(sk, skb);
1191 copy = size_goal;
1192 max = size_goal;
1193 }
1194
1195 /* Try to append data to the end of skb. */
1196 if (copy > seglen)
1197 copy = seglen;
1198
1199 /* Where to copy to? */
1200 if (skb_availroom(skb) > 0) {
1201 /* We have some space in skb head. Superb! */
1202 copy = min_t(int, copy, skb_availroom(skb));
1203 err = skb_add_data_nocache(sk, skb, from, copy);
1204 if (err)
1205 goto do_fault;
1206 } else {
1207 bool merge = true;
1208 int i = skb_shinfo(skb)->nr_frags;
1209 struct page_frag *pfrag = sk_page_frag(sk);
1210
1211 if (!sk_page_frag_refill(sk, pfrag))
1212 goto wait_for_memory;
1213
1214 if (!skb_can_coalesce(skb, i, pfrag->page,
1215 pfrag->offset)) {
1216 if (i == MAX_SKB_FRAGS || !sg) {
1217 tcp_mark_push(tp, skb);
1218 goto new_segment;
1219 }
1220 merge = false;
1221 }
1222
1223 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1224
1225 if (!sk_wmem_schedule(sk, copy))
1226 goto wait_for_memory;
1227
1228 err = skb_copy_to_page_nocache(sk, from, skb,
1229 pfrag->page,
1230 pfrag->offset,
1231 copy);
1232 if (err)
1233 goto do_error;
1234
1235 /* Update the skb. */
1236 if (merge) {
1237 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1238 } else {
1239 skb_fill_page_desc(skb, i, pfrag->page,
1240 pfrag->offset, copy);
1241 get_page(pfrag->page);
1242 }
1243 pfrag->offset += copy;
1244 }
1245
1246 if (!copied)
1247 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1248
1249 tp->write_seq += copy;
1250 TCP_SKB_CB(skb)->end_seq += copy;
1251 skb_shinfo(skb)->gso_segs = 0;
1252
1253 from += copy;
1254 copied += copy;
1255 if ((seglen -= copy) == 0 && iovlen == 0)
1256 goto out;
1257
1258 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1259 continue;
1260
1261 if (forced_push(tp)) {
1262 tcp_mark_push(tp, skb);
1263 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1264 } else if (skb == tcp_send_head(sk))
1265 tcp_push_one(sk, mss_now);
1266 continue;
1267
1268wait_for_sndbuf:
1269 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1270wait_for_memory:
1271 if (copied)
1272 tcp_push(sk, flags & ~MSG_MORE, mss_now,
1273 TCP_NAGLE_PUSH, size_goal);
1274
1275 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1276 goto do_error;
1277
1278 mss_now = tcp_send_mss(sk, &size_goal, flags);
1279 }
1280 }
1281
1282out:
1283 if (copied)
1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285 release_sock(sk);
1286 return copied + copied_syn;
1287
1288do_fault:
1289 if (!skb->len) {
1290 tcp_unlink_write_queue(skb, sk);
1291 /* It is the one place in all of TCP, except connection
1292 * reset, where we can be unlinking the send_head.
1293 */
1294 tcp_check_send_head(sk, skb);
1295 sk_wmem_free_skb(sk, skb);
1296 }
1297
1298do_error:
1299 if (copied + copied_syn)
1300 goto out;
1301out_err:
1302 err = sk_stream_error(sk, flags, err);
1303 release_sock(sk);
1304 return err;
1305}
1306EXPORT_SYMBOL(tcp_sendmsg);
1307
1308/*
1309 * Handle reading urgent data. BSD has very simple semantics for
1310 * this, no blocking and very strange errors 8)
1311 */
1312
1313static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1314{
1315 struct tcp_sock *tp = tcp_sk(sk);
1316
1317 /* No URG data to read. */
1318 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1319 tp->urg_data == TCP_URG_READ)
1320 return -EINVAL; /* Yes this is right ! */
1321
1322 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1323 return -ENOTCONN;
1324
1325 if (tp->urg_data & TCP_URG_VALID) {
1326 int err = 0;
1327 char c = tp->urg_data;
1328
1329 if (!(flags & MSG_PEEK))
1330 tp->urg_data = TCP_URG_READ;
1331
1332 /* Read urgent data. */
1333 msg->msg_flags |= MSG_OOB;
1334
1335 if (len > 0) {
1336 if (!(flags & MSG_TRUNC))
1337 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1338 len = 1;
1339 } else
1340 msg->msg_flags |= MSG_TRUNC;
1341
1342 return err ? -EFAULT : len;
1343 }
1344
1345 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1346 return 0;
1347
1348 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1349 * the available implementations agree in this case:
1350 * this call should never block, independent of the
1351 * blocking state of the socket.
1352 * Mike <pall@rz.uni-karlsruhe.de>
1353 */
1354 return -EAGAIN;
1355}
1356
1357static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1358{
1359 struct sk_buff *skb;
1360 int copied = 0, err = 0;
1361
1362 /* XXX -- need to support SO_PEEK_OFF */
1363
1364 skb_queue_walk(&sk->sk_write_queue, skb) {
1365 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
1366 if (err)
1367 break;
1368
1369 copied += skb->len;
1370 }
1371
1372 return err ?: copied;
1373}
1374
1375/* Clean up the receive buffer for full frames taken by the user,
1376 * then send an ACK if necessary. COPIED is the number of bytes
1377 * tcp_recvmsg has given to the user so far, it speeds up the
1378 * calculation of whether or not we must ACK for the sake of
1379 * a window update.
1380 */
1381void tcp_cleanup_rbuf(struct sock *sk, int copied)
1382{
1383 struct tcp_sock *tp = tcp_sk(sk);
1384 bool time_to_ack = false;
1385
1386 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1387
1388 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1389 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1390 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1391
1392 if (inet_csk_ack_scheduled(sk)) {
1393 const struct inet_connection_sock *icsk = inet_csk(sk);
1394 /* Delayed ACKs frequently hit locked sockets during bulk
1395 * receive. */
1396 if (icsk->icsk_ack.blocked ||
1397 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1398 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1399 /*
1400 * If this read emptied read buffer, we send ACK, if
1401 * connection is not bidirectional, user drained
1402 * receive buffer and there was a small segment
1403 * in queue.
1404 */
1405 (copied > 0 &&
1406 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1407 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1408 !icsk->icsk_ack.pingpong)) &&
1409 !atomic_read(&sk->sk_rmem_alloc)))
1410 time_to_ack = true;
1411 }
1412
1413 /* We send an ACK if we can now advertise a non-zero window
1414 * which has been raised "significantly".
1415 *
1416 * Even if window raised up to infinity, do not send window open ACK
1417 * in states, where we will not receive more. It is useless.
1418 */
1419 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1420 __u32 rcv_window_now = tcp_receive_window(tp);
1421
1422 /* Optimize, __tcp_select_window() is not cheap. */
1423 if (2*rcv_window_now <= tp->window_clamp) {
1424 __u32 new_window = __tcp_select_window(sk);
1425
1426 /* Send ACK now, if this read freed lots of space
1427 * in our buffer. Certainly, new_window is new window.
1428 * We can advertise it now, if it is not less than current one.
1429 * "Lots" means "at least twice" here.
1430 */
1431 if (new_window && new_window >= 2 * rcv_window_now)
1432 time_to_ack = true;
1433 }
1434 }
1435 if (time_to_ack)
1436 tcp_send_ack(sk);
1437}
1438
1439static void tcp_prequeue_process(struct sock *sk)
1440{
1441 struct sk_buff *skb;
1442 struct tcp_sock *tp = tcp_sk(sk);
1443
1444 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1445
1446 /* RX process wants to run with disabled BHs, though it is not
1447 * necessary */
1448 local_bh_disable();
1449 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1450 sk_backlog_rcv(sk, skb);
1451 local_bh_enable();
1452
1453 /* Clear memory counter. */
1454 tp->ucopy.memory = 0;
1455}
1456
1457#ifdef CONFIG_NET_DMA
1458static void tcp_service_net_dma(struct sock *sk, bool wait)
1459{
1460 dma_cookie_t done, used;
1461 dma_cookie_t last_issued;
1462 struct tcp_sock *tp = tcp_sk(sk);
1463
1464 if (!tp->ucopy.dma_chan)
1465 return;
1466
1467 last_issued = tp->ucopy.dma_cookie;
1468 dma_async_issue_pending(tp->ucopy.dma_chan);
1469
1470 do {
1471 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1472 last_issued, &done,
1473 &used) == DMA_COMPLETE) {
1474 /* Safe to free early-copied skbs now */
1475 __skb_queue_purge(&sk->sk_async_wait_queue);
1476 break;
1477 } else {
1478 struct sk_buff *skb;
1479 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1480 (dma_async_is_complete(skb->dma_cookie, done,
1481 used) == DMA_COMPLETE)) {
1482 __skb_dequeue(&sk->sk_async_wait_queue);
1483 kfree_skb(skb);
1484 }
1485 }
1486 } while (wait);
1487}
1488#endif
1489
1490static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1491{
1492 struct sk_buff *skb;
1493 u32 offset;
1494
1495 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1496 offset = seq - TCP_SKB_CB(skb)->seq;
1497 if (tcp_hdr(skb)->syn)
1498 offset--;
1499 if (offset < skb->len || tcp_hdr(skb)->fin) {
1500 *off = offset;
1501 return skb;
1502 }
1503 /* This looks weird, but this can happen if TCP collapsing
1504 * splitted a fat GRO packet, while we released socket lock
1505 * in skb_splice_bits()
1506 */
1507 sk_eat_skb(sk, skb, false);
1508 }
1509 return NULL;
1510}
1511
1512/*
1513 * This routine provides an alternative to tcp_recvmsg() for routines
1514 * that would like to handle copying from skbuffs directly in 'sendfile'
1515 * fashion.
1516 * Note:
1517 * - It is assumed that the socket was locked by the caller.
1518 * - The routine does not block.
1519 * - At present, there is no support for reading OOB data
1520 * or for 'peeking' the socket using this routine
1521 * (although both would be easy to implement).
1522 */
1523int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1524 sk_read_actor_t recv_actor)
1525{
1526 struct sk_buff *skb;
1527 struct tcp_sock *tp = tcp_sk(sk);
1528 u32 seq = tp->copied_seq;
1529 u32 offset;
1530 int copied = 0;
1531
1532 if (sk->sk_state == TCP_LISTEN)
1533 return -ENOTCONN;
1534 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1535 if (offset < skb->len) {
1536 int used;
1537 size_t len;
1538
1539 len = skb->len - offset;
1540 /* Stop reading if we hit a patch of urgent data */
1541 if (tp->urg_data) {
1542 u32 urg_offset = tp->urg_seq - seq;
1543 if (urg_offset < len)
1544 len = urg_offset;
1545 if (!len)
1546 break;
1547 }
1548 used = recv_actor(desc, skb, offset, len);
1549 if (used <= 0) {
1550 if (!copied)
1551 copied = used;
1552 break;
1553 } else if (used <= len) {
1554 seq += used;
1555 copied += used;
1556 offset += used;
1557 }
1558 /* If recv_actor drops the lock (e.g. TCP splice
1559 * receive) the skb pointer might be invalid when
1560 * getting here: tcp_collapse might have deleted it
1561 * while aggregating skbs from the socket queue.
1562 */
1563 skb = tcp_recv_skb(sk, seq - 1, &offset);
1564 if (!skb)
1565 break;
1566 /* TCP coalescing might have appended data to the skb.
1567 * Try to splice more frags
1568 */
1569 if (offset + 1 != skb->len)
1570 continue;
1571 }
1572 if (tcp_hdr(skb)->fin) {
1573 sk_eat_skb(sk, skb, false);
1574 ++seq;
1575 break;
1576 }
1577 sk_eat_skb(sk, skb, false);
1578 if (!desc->count)
1579 break;
1580 tp->copied_seq = seq;
1581 }
1582 tp->copied_seq = seq;
1583
1584 tcp_rcv_space_adjust(sk);
1585
1586 /* Clean up data we have read: This will do ACK frames. */
1587 if (copied > 0) {
1588 tcp_recv_skb(sk, seq, &offset);
1589 tcp_cleanup_rbuf(sk, copied);
1590 }
1591 return copied;
1592}
1593EXPORT_SYMBOL(tcp_read_sock);
1594
1595/*
1596 * This routine copies from a sock struct into the user buffer.
1597 *
1598 * Technical note: in 2.3 we work on _locked_ socket, so that
1599 * tricks with *seq access order and skb->users are not required.
1600 * Probably, code can be easily improved even more.
1601 */
1602
1603int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1604 size_t len, int nonblock, int flags, int *addr_len)
1605{
1606 struct tcp_sock *tp = tcp_sk(sk);
1607 int copied = 0;
1608 u32 peek_seq;
1609 u32 *seq;
1610 unsigned long used;
1611 int err;
1612 int target; /* Read at least this many bytes */
1613 long timeo;
1614 struct task_struct *user_recv = NULL;
1615 bool copied_early = false;
1616 struct sk_buff *skb;
1617 u32 urg_hole = 0;
1618
1619 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1620 (sk->sk_state == TCP_ESTABLISHED))
1621 sk_busy_loop(sk, nonblock);
1622
1623 lock_sock(sk);
1624
1625 err = -ENOTCONN;
1626 if (sk->sk_state == TCP_LISTEN)
1627 goto out;
1628
1629 timeo = sock_rcvtimeo(sk, nonblock);
1630
1631 /* Urgent data needs to be handled specially. */
1632 if (flags & MSG_OOB)
1633 goto recv_urg;
1634
1635 if (unlikely(tp->repair)) {
1636 err = -EPERM;
1637 if (!(flags & MSG_PEEK))
1638 goto out;
1639
1640 if (tp->repair_queue == TCP_SEND_QUEUE)
1641 goto recv_sndq;
1642
1643 err = -EINVAL;
1644 if (tp->repair_queue == TCP_NO_QUEUE)
1645 goto out;
1646
1647 /* 'common' recv queue MSG_PEEK-ing */
1648 }
1649
1650 seq = &tp->copied_seq;
1651 if (flags & MSG_PEEK) {
1652 peek_seq = tp->copied_seq;
1653 seq = &peek_seq;
1654 }
1655
1656 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1657
1658#ifdef CONFIG_NET_DMA
1659 tp->ucopy.dma_chan = NULL;
1660 preempt_disable();
1661 skb = skb_peek_tail(&sk->sk_receive_queue);
1662 {
1663 int available = 0;
1664
1665 if (skb)
1666 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1667 if ((available < target) &&
1668 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1669 !sysctl_tcp_low_latency &&
1670 net_dma_find_channel()) {
1671 preempt_enable();
1672 tp->ucopy.pinned_list =
1673 dma_pin_iovec_pages(msg->msg_iov, len);
1674 } else {
1675 preempt_enable();
1676 }
1677 }
1678#endif
1679
1680 do {
1681 u32 offset;
1682
1683 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1684 if (tp->urg_data && tp->urg_seq == *seq) {
1685 if (copied)
1686 break;
1687 if (signal_pending(current)) {
1688 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1689 break;
1690 }
1691 }
1692
1693 /* Next get a buffer. */
1694
1695 skb_queue_walk(&sk->sk_receive_queue, skb) {
1696 /* Now that we have two receive queues this
1697 * shouldn't happen.
1698 */
1699 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1700 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1701 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1702 flags))
1703 break;
1704
1705 offset = *seq - TCP_SKB_CB(skb)->seq;
1706 if (tcp_hdr(skb)->syn)
1707 offset--;
1708 if (offset < skb->len)
1709 goto found_ok_skb;
1710 if (tcp_hdr(skb)->fin)
1711 goto found_fin_ok;
1712 WARN(!(flags & MSG_PEEK),
1713 "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1714 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1715 }
1716
1717 /* Well, if we have backlog, try to process it now yet. */
1718
1719 if (copied >= target && !sk->sk_backlog.tail)
1720 break;
1721
1722 if (copied) {
1723 if (sk->sk_err ||
1724 sk->sk_state == TCP_CLOSE ||
1725 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1726 !timeo ||
1727 signal_pending(current))
1728 break;
1729 } else {
1730 if (sock_flag(sk, SOCK_DONE))
1731 break;
1732
1733 if (sk->sk_err) {
1734 copied = sock_error(sk);
1735 break;
1736 }
1737
1738 if (sk->sk_shutdown & RCV_SHUTDOWN)
1739 break;
1740
1741 if (sk->sk_state == TCP_CLOSE) {
1742 if (!sock_flag(sk, SOCK_DONE)) {
1743 /* This occurs when user tries to read
1744 * from never connected socket.
1745 */
1746 copied = -ENOTCONN;
1747 break;
1748 }
1749 break;
1750 }
1751
1752 if (!timeo) {
1753 copied = -EAGAIN;
1754 break;
1755 }
1756
1757 if (signal_pending(current)) {
1758 copied = sock_intr_errno(timeo);
1759 break;
1760 }
1761 }
1762
1763 tcp_cleanup_rbuf(sk, copied);
1764
1765 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1766 /* Install new reader */
1767 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1768 user_recv = current;
1769 tp->ucopy.task = user_recv;
1770 tp->ucopy.iov = msg->msg_iov;
1771 }
1772
1773 tp->ucopy.len = len;
1774
1775 WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1776 !(flags & (MSG_PEEK | MSG_TRUNC)));
1777
1778 /* Ugly... If prequeue is not empty, we have to
1779 * process it before releasing socket, otherwise
1780 * order will be broken at second iteration.
1781 * More elegant solution is required!!!
1782 *
1783 * Look: we have the following (pseudo)queues:
1784 *
1785 * 1. packets in flight
1786 * 2. backlog
1787 * 3. prequeue
1788 * 4. receive_queue
1789 *
1790 * Each queue can be processed only if the next ones
1791 * are empty. At this point we have empty receive_queue.
1792 * But prequeue _can_ be not empty after 2nd iteration,
1793 * when we jumped to start of loop because backlog
1794 * processing added something to receive_queue.
1795 * We cannot release_sock(), because backlog contains
1796 * packets arrived _after_ prequeued ones.
1797 *
1798 * Shortly, algorithm is clear --- to process all
1799 * the queues in order. We could make it more directly,
1800 * requeueing packets from backlog to prequeue, if
1801 * is not empty. It is more elegant, but eats cycles,
1802 * unfortunately.
1803 */
1804 if (!skb_queue_empty(&tp->ucopy.prequeue))
1805 goto do_prequeue;
1806
1807 /* __ Set realtime policy in scheduler __ */
1808 }
1809
1810#ifdef CONFIG_NET_DMA
1811 if (tp->ucopy.dma_chan) {
1812 if (tp->rcv_wnd == 0 &&
1813 !skb_queue_empty(&sk->sk_async_wait_queue)) {
1814 tcp_service_net_dma(sk, true);
1815 tcp_cleanup_rbuf(sk, copied);
1816 } else
1817 dma_async_issue_pending(tp->ucopy.dma_chan);
1818 }
1819#endif
1820 if (copied >= target) {
1821 /* Do not sleep, just process backlog. */
1822 release_sock(sk);
1823 lock_sock(sk);
1824 } else
1825 sk_wait_data(sk, &timeo);
1826
1827#ifdef CONFIG_NET_DMA
1828 tcp_service_net_dma(sk, false); /* Don't block */
1829 tp->ucopy.wakeup = 0;
1830#endif
1831
1832 if (user_recv) {
1833 int chunk;
1834
1835 /* __ Restore normal policy in scheduler __ */
1836
1837 if ((chunk = len - tp->ucopy.len) != 0) {
1838 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1839 len -= chunk;
1840 copied += chunk;
1841 }
1842
1843 if (tp->rcv_nxt == tp->copied_seq &&
1844 !skb_queue_empty(&tp->ucopy.prequeue)) {
1845do_prequeue:
1846 tcp_prequeue_process(sk);
1847
1848 if ((chunk = len - tp->ucopy.len) != 0) {
1849 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1850 len -= chunk;
1851 copied += chunk;
1852 }
1853 }
1854 }
1855 if ((flags & MSG_PEEK) &&
1856 (peek_seq - copied - urg_hole != tp->copied_seq)) {
1857 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1858 current->comm,
1859 task_pid_nr(current));
1860 peek_seq = tp->copied_seq;
1861 }
1862 continue;
1863
1864 found_ok_skb:
1865 /* Ok so how much can we use? */
1866 used = skb->len - offset;
1867 if (len < used)
1868 used = len;
1869
1870 /* Do we have urgent data here? */
1871 if (tp->urg_data) {
1872 u32 urg_offset = tp->urg_seq - *seq;
1873 if (urg_offset < used) {
1874 if (!urg_offset) {
1875 if (!sock_flag(sk, SOCK_URGINLINE)) {
1876 ++*seq;
1877 urg_hole++;
1878 offset++;
1879 used--;
1880 if (!used)
1881 goto skip_copy;
1882 }
1883 } else
1884 used = urg_offset;
1885 }
1886 }
1887
1888 if (!(flags & MSG_TRUNC)) {
1889#ifdef CONFIG_NET_DMA
1890 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1891 tp->ucopy.dma_chan = net_dma_find_channel();
1892
1893 if (tp->ucopy.dma_chan) {
1894 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1895 tp->ucopy.dma_chan, skb, offset,
1896 msg->msg_iov, used,
1897 tp->ucopy.pinned_list);
1898
1899 if (tp->ucopy.dma_cookie < 0) {
1900
1901 pr_alert("%s: dma_cookie < 0\n",
1902 __func__);
1903
1904 /* Exception. Bailout! */
1905 if (!copied)
1906 copied = -EFAULT;
1907 break;
1908 }
1909
1910 dma_async_issue_pending(tp->ucopy.dma_chan);
1911
1912 if ((offset + used) == skb->len)
1913 copied_early = true;
1914
1915 } else
1916#endif
1917 {
1918 err = skb_copy_datagram_iovec(skb, offset,
1919 msg->msg_iov, used);
1920 if (err) {
1921 /* Exception. Bailout! */
1922 if (!copied)
1923 copied = -EFAULT;
1924 break;
1925 }
1926 }
1927 }
1928
1929 *seq += used;
1930 copied += used;
1931 len -= used;
1932
1933 tcp_rcv_space_adjust(sk);
1934
1935skip_copy:
1936 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1937 tp->urg_data = 0;
1938 tcp_fast_path_check(sk);
1939 }
1940 if (used + offset < skb->len)
1941 continue;
1942
1943 if (tcp_hdr(skb)->fin)
1944 goto found_fin_ok;
1945 if (!(flags & MSG_PEEK)) {
1946 sk_eat_skb(sk, skb, copied_early);
1947 copied_early = false;
1948 }
1949 continue;
1950
1951 found_fin_ok:
1952 /* Process the FIN. */
1953 ++*seq;
1954 if (!(flags & MSG_PEEK)) {
1955 sk_eat_skb(sk, skb, copied_early);
1956 copied_early = false;
1957 }
1958 break;
1959 } while (len > 0);
1960
1961 if (user_recv) {
1962 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1963 int chunk;
1964
1965 tp->ucopy.len = copied > 0 ? len : 0;
1966
1967 tcp_prequeue_process(sk);
1968
1969 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1970 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1971 len -= chunk;
1972 copied += chunk;
1973 }
1974 }
1975
1976 tp->ucopy.task = NULL;
1977 tp->ucopy.len = 0;
1978 }
1979
1980#ifdef CONFIG_NET_DMA
1981 tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1982 tp->ucopy.dma_chan = NULL;
1983
1984 if (tp->ucopy.pinned_list) {
1985 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1986 tp->ucopy.pinned_list = NULL;
1987 }
1988#endif
1989
1990 /* According to UNIX98, msg_name/msg_namelen are ignored
1991 * on connected socket. I was just happy when found this 8) --ANK
1992 */
1993
1994 /* Clean up data we have read: This will do ACK frames. */
1995 tcp_cleanup_rbuf(sk, copied);
1996
1997 release_sock(sk);
1998 return copied;
1999
2000out:
2001 release_sock(sk);
2002 return err;
2003
2004recv_urg:
2005 err = tcp_recv_urg(sk, msg, len, flags);
2006 goto out;
2007
2008recv_sndq:
2009 err = tcp_peek_sndq(sk, msg, len);
2010 goto out;
2011}
2012EXPORT_SYMBOL(tcp_recvmsg);
2013
2014void tcp_set_state(struct sock *sk, int state)
2015{
2016 int oldstate = sk->sk_state;
2017
2018 switch (state) {
2019 case TCP_ESTABLISHED:
2020 if (oldstate != TCP_ESTABLISHED)
2021 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2022 break;
2023
2024 case TCP_CLOSE:
2025 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2026 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2027
2028 sk->sk_prot->unhash(sk);
2029 if (inet_csk(sk)->icsk_bind_hash &&
2030 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2031 inet_put_port(sk);
2032 /* fall through */
2033 default:
2034 if (oldstate == TCP_ESTABLISHED)
2035 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2036 }
2037
2038 /* Change state AFTER socket is unhashed to avoid closed
2039 * socket sitting in hash tables.
2040 */
2041 sk->sk_state = state;
2042
2043#ifdef STATE_TRACE
2044 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
2045#endif
2046}
2047EXPORT_SYMBOL_GPL(tcp_set_state);
2048
2049/*
2050 * State processing on a close. This implements the state shift for
2051 * sending our FIN frame. Note that we only send a FIN for some
2052 * states. A shutdown() may have already sent the FIN, or we may be
2053 * closed.
2054 */
2055
2056static const unsigned char new_state[16] = {
2057 /* current state: new state: action: */
2058 /* (Invalid) */ TCP_CLOSE,
2059 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2060 /* TCP_SYN_SENT */ TCP_CLOSE,
2061 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2062 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
2063 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
2064 /* TCP_TIME_WAIT */ TCP_CLOSE,
2065 /* TCP_CLOSE */ TCP_CLOSE,
2066 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
2067 /* TCP_LAST_ACK */ TCP_LAST_ACK,
2068 /* TCP_LISTEN */ TCP_CLOSE,
2069 /* TCP_CLOSING */ TCP_CLOSING,
2070};
2071
2072static int tcp_close_state(struct sock *sk)
2073{
2074 int next = (int)new_state[sk->sk_state];
2075 int ns = next & TCP_STATE_MASK;
2076
2077 tcp_set_state(sk, ns);
2078
2079 return next & TCP_ACTION_FIN;
2080}
2081
2082/*
2083 * Shutdown the sending side of a connection. Much like close except
2084 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2085 */
2086
2087void tcp_shutdown(struct sock *sk, int how)
2088{
2089 /* We need to grab some memory, and put together a FIN,
2090 * and then put it into the queue to be sent.
2091 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2092 */
2093 if (!(how & SEND_SHUTDOWN))
2094 return;
2095
2096 /* If we've already sent a FIN, or it's a closed state, skip this. */
2097 if ((1 << sk->sk_state) &
2098 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2099 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2100 /* Clear out any half completed packets. FIN if needed. */
2101 if (tcp_close_state(sk))
2102 tcp_send_fin(sk);
2103 }
2104}
2105EXPORT_SYMBOL(tcp_shutdown);
2106
2107bool tcp_check_oom(struct sock *sk, int shift)
2108{
2109 bool too_many_orphans, out_of_socket_memory;
2110
2111 too_many_orphans = tcp_too_many_orphans(sk, shift);
2112 out_of_socket_memory = tcp_out_of_memory(sk);
2113
2114 if (too_many_orphans)
2115 net_info_ratelimited("too many orphaned sockets\n");
2116 if (out_of_socket_memory)
2117 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2118 return too_many_orphans || out_of_socket_memory;
2119}
2120
2121void tcp_close(struct sock *sk, long timeout)
2122{
2123 struct sk_buff *skb;
2124 int data_was_unread = 0;
2125 int state;
2126
2127 lock_sock(sk);
2128 sk->sk_shutdown = SHUTDOWN_MASK;
2129
2130 if (sk->sk_state == TCP_LISTEN) {
2131 tcp_set_state(sk, TCP_CLOSE);
2132
2133 /* Special case. */
2134 inet_csk_listen_stop(sk);
2135
2136 goto adjudge_to_death;
2137 }
2138
2139 /* We need to flush the recv. buffs. We do this only on the
2140 * descriptor close, not protocol-sourced closes, because the
2141 * reader process may not have drained the data yet!
2142 */
2143 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2144 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
2145 tcp_hdr(skb)->fin;
2146 data_was_unread += len;
2147 __kfree_skb(skb);
2148 }
2149
2150 sk_mem_reclaim(sk);
2151
2152 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2153 if (sk->sk_state == TCP_CLOSE)
2154 goto adjudge_to_death;
2155
2156 /* As outlined in RFC 2525, section 2.17, we send a RST here because
2157 * data was lost. To witness the awful effects of the old behavior of
2158 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2159 * GET in an FTP client, suspend the process, wait for the client to
2160 * advertise a zero window, then kill -9 the FTP client, wheee...
2161 * Note: timeout is always zero in such a case.
2162 */
2163 if (unlikely(tcp_sk(sk)->repair)) {
2164 sk->sk_prot->disconnect(sk, 0);
2165 } else if (data_was_unread) {
2166 /* Unread data was tossed, zap the connection. */
2167 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2168 tcp_set_state(sk, TCP_CLOSE);
2169 tcp_send_active_reset(sk, sk->sk_allocation);
2170 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2171 /* Check zero linger _after_ checking for unread data. */
2172 sk->sk_prot->disconnect(sk, 0);
2173 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2174 } else if (tcp_close_state(sk)) {
2175 /* We FIN if the application ate all the data before
2176 * zapping the connection.
2177 */
2178
2179 /* RED-PEN. Formally speaking, we have broken TCP state
2180 * machine. State transitions:
2181 *
2182 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2183 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2184 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2185 *
2186 * are legal only when FIN has been sent (i.e. in window),
2187 * rather than queued out of window. Purists blame.
2188 *
2189 * F.e. "RFC state" is ESTABLISHED,
2190 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2191 *
2192 * The visible declinations are that sometimes
2193 * we enter time-wait state, when it is not required really
2194 * (harmless), do not send active resets, when they are
2195 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2196 * they look as CLOSING or LAST_ACK for Linux)
2197 * Probably, I missed some more holelets.
2198 * --ANK
2199 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2200 * in a single packet! (May consider it later but will
2201 * probably need API support or TCP_CORK SYN-ACK until
2202 * data is written and socket is closed.)
2203 */
2204 tcp_send_fin(sk);
2205 }
2206
2207 sk_stream_wait_close(sk, timeout);
2208
2209adjudge_to_death:
2210 state = sk->sk_state;
2211 sock_hold(sk);
2212 sock_orphan(sk);
2213
2214 /* It is the last release_sock in its life. It will remove backlog. */
2215 release_sock(sk);
2216
2217
2218 /* Now socket is owned by kernel and we acquire BH lock
2219 to finish close. No need to check for user refs.
2220 */
2221 local_bh_disable();
2222 bh_lock_sock(sk);
2223 WARN_ON(sock_owned_by_user(sk));
2224
2225 percpu_counter_inc(sk->sk_prot->orphan_count);
2226
2227 /* Have we already been destroyed by a softirq or backlog? */
2228 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2229 goto out;
2230
2231 /* This is a (useful) BSD violating of the RFC. There is a
2232 * problem with TCP as specified in that the other end could
2233 * keep a socket open forever with no application left this end.
2234 * We use a 1 minute timeout (about the same as BSD) then kill
2235 * our end. If they send after that then tough - BUT: long enough
2236 * that we won't make the old 4*rto = almost no time - whoops
2237 * reset mistake.
2238 *
2239 * Nope, it was not mistake. It is really desired behaviour
2240 * f.e. on http servers, when such sockets are useless, but
2241 * consume significant resources. Let's do it with special
2242 * linger2 option. --ANK
2243 */
2244
2245 if (sk->sk_state == TCP_FIN_WAIT2) {
2246 struct tcp_sock *tp = tcp_sk(sk);
2247 if (tp->linger2 < 0) {
2248 tcp_set_state(sk, TCP_CLOSE);
2249 tcp_send_active_reset(sk, GFP_ATOMIC);
2250 NET_INC_STATS_BH(sock_net(sk),
2251 LINUX_MIB_TCPABORTONLINGER);
2252 } else {
2253 const int tmo = tcp_fin_time(sk);
2254
2255 if (tmo > TCP_TIMEWAIT_LEN) {
2256 inet_csk_reset_keepalive_timer(sk,
2257 tmo - TCP_TIMEWAIT_LEN);
2258 } else {
2259 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2260 goto out;
2261 }
2262 }
2263 }
2264 if (sk->sk_state != TCP_CLOSE) {
2265 sk_mem_reclaim(sk);
2266 if (tcp_check_oom(sk, 0)) {
2267 tcp_set_state(sk, TCP_CLOSE);
2268 tcp_send_active_reset(sk, GFP_ATOMIC);
2269 NET_INC_STATS_BH(sock_net(sk),
2270 LINUX_MIB_TCPABORTONMEMORY);
2271 }
2272 }
2273
2274 if (sk->sk_state == TCP_CLOSE) {
2275 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2276 /* We could get here with a non-NULL req if the socket is
2277 * aborted (e.g., closed with unread data) before 3WHS
2278 * finishes.
2279 */
2280 if (req != NULL)
2281 reqsk_fastopen_remove(sk, req, false);
2282 inet_csk_destroy_sock(sk);
2283 }
2284 /* Otherwise, socket is reprieved until protocol close. */
2285
2286out:
2287 bh_unlock_sock(sk);
2288 local_bh_enable();
2289 sock_put(sk);
2290}
2291EXPORT_SYMBOL(tcp_close);
2292
2293/* These states need RST on ABORT according to RFC793 */
2294
2295static inline bool tcp_need_reset(int state)
2296{
2297 return (1 << state) &
2298 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2299 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2300}
2301
2302int tcp_disconnect(struct sock *sk, int flags)
2303{
2304 struct inet_sock *inet = inet_sk(sk);
2305 struct inet_connection_sock *icsk = inet_csk(sk);
2306 struct tcp_sock *tp = tcp_sk(sk);
2307 int err = 0;
2308 int old_state = sk->sk_state;
2309
2310 if (old_state != TCP_CLOSE)
2311 tcp_set_state(sk, TCP_CLOSE);
2312
2313 /* ABORT function of RFC793 */
2314 if (old_state == TCP_LISTEN) {
2315 inet_csk_listen_stop(sk);
2316 } else if (unlikely(tp->repair)) {
2317 sk->sk_err = ECONNABORTED;
2318 } else if (tcp_need_reset(old_state) ||
2319 (tp->snd_nxt != tp->write_seq &&
2320 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2321 /* The last check adjusts for discrepancy of Linux wrt. RFC
2322 * states
2323 */
2324 tcp_send_active_reset(sk, gfp_any());
2325 sk->sk_err = ECONNRESET;
2326 } else if (old_state == TCP_SYN_SENT)
2327 sk->sk_err = ECONNRESET;
2328
2329 tcp_clear_xmit_timers(sk);
2330 __skb_queue_purge(&sk->sk_receive_queue);
2331 tcp_write_queue_purge(sk);
2332 __skb_queue_purge(&tp->out_of_order_queue);
2333#ifdef CONFIG_NET_DMA
2334 __skb_queue_purge(&sk->sk_async_wait_queue);
2335#endif
2336
2337 inet->inet_dport = 0;
2338
2339 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2340 inet_reset_saddr(sk);
2341
2342 sk->sk_shutdown = 0;
2343 sock_reset_flag(sk, SOCK_DONE);
2344 tp->srtt_us = 0;
2345 if ((tp->write_seq += tp->max_window + 2) == 0)
2346 tp->write_seq = 1;
2347 icsk->icsk_backoff = 0;
2348 tp->snd_cwnd = 2;
2349 icsk->icsk_probes_out = 0;
2350 tp->packets_out = 0;
2351 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2352 tp->snd_cwnd_cnt = 0;
2353 tp->window_clamp = 0;
2354 tcp_set_ca_state(sk, TCP_CA_Open);
2355 tcp_clear_retrans(tp);
2356 inet_csk_delack_init(sk);
2357 tcp_init_send_head(sk);
2358 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2359 __sk_dst_reset(sk);
2360
2361 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2362
2363 sk->sk_error_report(sk);
2364 return err;
2365}
2366EXPORT_SYMBOL(tcp_disconnect);
2367
2368void tcp_sock_destruct(struct sock *sk)
2369{
2370 inet_sock_destruct(sk);
2371
2372 kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
2373}
2374
2375static inline bool tcp_can_repair_sock(const struct sock *sk)
2376{
2377 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2378 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2379}
2380
2381static int tcp_repair_options_est(struct tcp_sock *tp,
2382 struct tcp_repair_opt __user *optbuf, unsigned int len)
2383{
2384 struct tcp_repair_opt opt;
2385
2386 while (len >= sizeof(opt)) {
2387 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2388 return -EFAULT;
2389
2390 optbuf++;
2391 len -= sizeof(opt);
2392
2393 switch (opt.opt_code) {
2394 case TCPOPT_MSS:
2395 tp->rx_opt.mss_clamp = opt.opt_val;
2396 break;
2397 case TCPOPT_WINDOW:
2398 {
2399 u16 snd_wscale = opt.opt_val & 0xFFFF;
2400 u16 rcv_wscale = opt.opt_val >> 16;
2401
2402 if (snd_wscale > 14 || rcv_wscale > 14)
2403 return -EFBIG;
2404
2405 tp->rx_opt.snd_wscale = snd_wscale;
2406 tp->rx_opt.rcv_wscale = rcv_wscale;
2407 tp->rx_opt.wscale_ok = 1;
2408 }
2409 break;
2410 case TCPOPT_SACK_PERM:
2411 if (opt.opt_val != 0)
2412 return -EINVAL;
2413
2414 tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2415 if (sysctl_tcp_fack)
2416 tcp_enable_fack(tp);
2417 break;
2418 case TCPOPT_TIMESTAMP:
2419 if (opt.opt_val != 0)
2420 return -EINVAL;
2421
2422 tp->rx_opt.tstamp_ok = 1;
2423 break;
2424 }
2425 }
2426
2427 return 0;
2428}
2429
2430/*
2431 * Socket option code for TCP.
2432 */
2433static int do_tcp_setsockopt(struct sock *sk, int level,
2434 int optname, char __user *optval, unsigned int optlen)
2435{
2436 struct tcp_sock *tp = tcp_sk(sk);
2437 struct inet_connection_sock *icsk = inet_csk(sk);
2438 int val;
2439 int err = 0;
2440
2441 /* These are data/string values, all the others are ints */
2442 switch (optname) {
2443 case TCP_CONGESTION: {
2444 char name[TCP_CA_NAME_MAX];
2445
2446 if (optlen < 1)
2447 return -EINVAL;
2448
2449 val = strncpy_from_user(name, optval,
2450 min_t(long, TCP_CA_NAME_MAX-1, optlen));
2451 if (val < 0)
2452 return -EFAULT;
2453 name[val] = 0;
2454
2455 lock_sock(sk);
2456 err = tcp_set_congestion_control(sk, name);
2457 release_sock(sk);
2458 return err;
2459 }
2460 default:
2461 /* fallthru */
2462 break;
2463 }
2464
2465 if (optlen < sizeof(int))
2466 return -EINVAL;
2467
2468 if (get_user(val, (int __user *)optval))
2469 return -EFAULT;
2470
2471 lock_sock(sk);
2472
2473 switch (optname) {
2474 case TCP_MAXSEG:
2475 /* Values greater than interface MTU won't take effect. However
2476 * at the point when this call is done we typically don't yet
2477 * know which interface is going to be used */
2478 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2479 err = -EINVAL;
2480 break;
2481 }
2482 tp->rx_opt.user_mss = val;
2483 break;
2484
2485 case TCP_NODELAY:
2486 if (val) {
2487 /* TCP_NODELAY is weaker than TCP_CORK, so that
2488 * this option on corked socket is remembered, but
2489 * it is not activated until cork is cleared.
2490 *
2491 * However, when TCP_NODELAY is set we make
2492 * an explicit push, which overrides even TCP_CORK
2493 * for currently queued segments.
2494 */
2495 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2496 tcp_push_pending_frames(sk);
2497 } else {
2498 tp->nonagle &= ~TCP_NAGLE_OFF;
2499 }
2500 break;
2501
2502 case TCP_THIN_LINEAR_TIMEOUTS:
2503 if (val < 0 || val > 1)
2504 err = -EINVAL;
2505 else
2506 tp->thin_lto = val;
2507 break;
2508
2509 case TCP_THIN_DUPACK:
2510 if (val < 0 || val > 1)
2511 err = -EINVAL;
2512 else {
2513 tp->thin_dupack = val;
2514 if (tp->thin_dupack)
2515 tcp_disable_early_retrans(tp);
2516 }
2517 break;
2518
2519 case TCP_REPAIR:
2520 if (!tcp_can_repair_sock(sk))
2521 err = -EPERM;
2522 else if (val == 1) {
2523 tp->repair = 1;
2524 sk->sk_reuse = SK_FORCE_REUSE;
2525 tp->repair_queue = TCP_NO_QUEUE;
2526 } else if (val == 0) {
2527 tp->repair = 0;
2528 sk->sk_reuse = SK_NO_REUSE;
2529 tcp_send_window_probe(sk);
2530 } else
2531 err = -EINVAL;
2532
2533 break;
2534
2535 case TCP_REPAIR_QUEUE:
2536 if (!tp->repair)
2537 err = -EPERM;
2538 else if (val < TCP_QUEUES_NR)
2539 tp->repair_queue = val;
2540 else
2541 err = -EINVAL;
2542 break;
2543
2544 case TCP_QUEUE_SEQ:
2545 if (sk->sk_state != TCP_CLOSE)
2546 err = -EPERM;
2547 else if (tp->repair_queue == TCP_SEND_QUEUE)
2548 tp->write_seq = val;
2549 else if (tp->repair_queue == TCP_RECV_QUEUE)
2550 tp->rcv_nxt = val;
2551 else
2552 err = -EINVAL;
2553 break;
2554
2555 case TCP_REPAIR_OPTIONS:
2556 if (!tp->repair)
2557 err = -EINVAL;
2558 else if (sk->sk_state == TCP_ESTABLISHED)
2559 err = tcp_repair_options_est(tp,
2560 (struct tcp_repair_opt __user *)optval,
2561 optlen);
2562 else
2563 err = -EPERM;
2564 break;
2565
2566 case TCP_CORK:
2567 /* When set indicates to always queue non-full frames.
2568 * Later the user clears this option and we transmit
2569 * any pending partial frames in the queue. This is
2570 * meant to be used alongside sendfile() to get properly
2571 * filled frames when the user (for example) must write
2572 * out headers with a write() call first and then use
2573 * sendfile to send out the data parts.
2574 *
2575 * TCP_CORK can be set together with TCP_NODELAY and it is
2576 * stronger than TCP_NODELAY.
2577 */
2578 if (val) {
2579 tp->nonagle |= TCP_NAGLE_CORK;
2580 } else {
2581 tp->nonagle &= ~TCP_NAGLE_CORK;
2582 if (tp->nonagle&TCP_NAGLE_OFF)
2583 tp->nonagle |= TCP_NAGLE_PUSH;
2584 tcp_push_pending_frames(sk);
2585 }
2586 break;
2587
2588 case TCP_KEEPIDLE:
2589 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2590 err = -EINVAL;
2591 else {
2592 tp->keepalive_time = val * HZ;
2593 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2594 !((1 << sk->sk_state) &
2595 (TCPF_CLOSE | TCPF_LISTEN))) {
2596 u32 elapsed = keepalive_time_elapsed(tp);
2597 if (tp->keepalive_time > elapsed)
2598 elapsed = tp->keepalive_time - elapsed;
2599 else
2600 elapsed = 0;
2601 inet_csk_reset_keepalive_timer(sk, elapsed);
2602 }
2603 }
2604 break;
2605 case TCP_KEEPINTVL:
2606 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2607 err = -EINVAL;
2608 else
2609 tp->keepalive_intvl = val * HZ;
2610 break;
2611 case TCP_KEEPCNT:
2612 if (val < 1 || val > MAX_TCP_KEEPCNT)
2613 err = -EINVAL;
2614 else
2615 tp->keepalive_probes = val;
2616 break;
2617 case TCP_SYNCNT:
2618 if (val < 1 || val > MAX_TCP_SYNCNT)
2619 err = -EINVAL;
2620 else
2621 icsk->icsk_syn_retries = val;
2622 break;
2623
2624 case TCP_LINGER2:
2625 if (val < 0)
2626 tp->linger2 = -1;
2627 else if (val > sysctl_tcp_fin_timeout / HZ)
2628 tp->linger2 = 0;
2629 else
2630 tp->linger2 = val * HZ;
2631 break;
2632
2633 case TCP_DEFER_ACCEPT:
2634 /* Translate value in seconds to number of retransmits */
2635 icsk->icsk_accept_queue.rskq_defer_accept =
2636 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2637 TCP_RTO_MAX / HZ);
2638 break;
2639
2640 case TCP_WINDOW_CLAMP:
2641 if (!val) {
2642 if (sk->sk_state != TCP_CLOSE) {
2643 err = -EINVAL;
2644 break;
2645 }
2646 tp->window_clamp = 0;
2647 } else
2648 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2649 SOCK_MIN_RCVBUF / 2 : val;
2650 break;
2651
2652 case TCP_QUICKACK:
2653 if (!val) {
2654 icsk->icsk_ack.pingpong = 1;
2655 } else {
2656 icsk->icsk_ack.pingpong = 0;
2657 if ((1 << sk->sk_state) &
2658 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2659 inet_csk_ack_scheduled(sk)) {
2660 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2661 tcp_cleanup_rbuf(sk, 1);
2662 if (!(val & 1))
2663 icsk->icsk_ack.pingpong = 1;
2664 }
2665 }
2666 break;
2667
2668#ifdef CONFIG_TCP_MD5SIG
2669 case TCP_MD5SIG:
2670 /* Read the IP->Key mappings from userspace */
2671 err = tp->af_specific->md5_parse(sk, optval, optlen);
2672 break;
2673#endif
2674 case TCP_USER_TIMEOUT:
2675 /* Cap the max timeout in ms TCP will retry/retrans
2676 * before giving up and aborting (ETIMEDOUT) a connection.
2677 */
2678 if (val < 0)
2679 err = -EINVAL;
2680 else
2681 icsk->icsk_user_timeout = msecs_to_jiffies(val);
2682 break;
2683
2684 case TCP_FASTOPEN:
2685 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2686 TCPF_LISTEN)))
2687 err = fastopen_init_queue(sk, val);
2688 else
2689 err = -EINVAL;
2690 break;
2691 case TCP_TIMESTAMP:
2692 if (!tp->repair)
2693 err = -EPERM;
2694 else
2695 tp->tsoffset = val - tcp_time_stamp;
2696 break;
2697 case TCP_NOTSENT_LOWAT:
2698 tp->notsent_lowat = val;
2699 sk->sk_write_space(sk);
2700 break;
2701 default:
2702 err = -ENOPROTOOPT;
2703 break;
2704 }
2705
2706 release_sock(sk);
2707 return err;
2708}
2709
2710int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2711 unsigned int optlen)
2712{
2713 const struct inet_connection_sock *icsk = inet_csk(sk);
2714
2715 if (level != SOL_TCP)
2716 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2717 optval, optlen);
2718 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2719}
2720EXPORT_SYMBOL(tcp_setsockopt);
2721
2722#ifdef CONFIG_COMPAT
2723int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2724 char __user *optval, unsigned int optlen)
2725{
2726 if (level != SOL_TCP)
2727 return inet_csk_compat_setsockopt(sk, level, optname,
2728 optval, optlen);
2729 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2730}
2731EXPORT_SYMBOL(compat_tcp_setsockopt);
2732#endif
2733
2734/* Return information about state of tcp endpoint in API format. */
2735void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2736{
2737 const struct tcp_sock *tp = tcp_sk(sk);
2738 const struct inet_connection_sock *icsk = inet_csk(sk);
2739 u32 now = tcp_time_stamp;
2740
2741 memset(info, 0, sizeof(*info));
2742
2743 info->tcpi_state = sk->sk_state;
2744 info->tcpi_ca_state = icsk->icsk_ca_state;
2745 info->tcpi_retransmits = icsk->icsk_retransmits;
2746 info->tcpi_probes = icsk->icsk_probes_out;
2747 info->tcpi_backoff = icsk->icsk_backoff;
2748
2749 if (tp->rx_opt.tstamp_ok)
2750 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2751 if (tcp_is_sack(tp))
2752 info->tcpi_options |= TCPI_OPT_SACK;
2753 if (tp->rx_opt.wscale_ok) {
2754 info->tcpi_options |= TCPI_OPT_WSCALE;
2755 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2756 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2757 }
2758
2759 if (tp->ecn_flags & TCP_ECN_OK)
2760 info->tcpi_options |= TCPI_OPT_ECN;
2761 if (tp->ecn_flags & TCP_ECN_SEEN)
2762 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2763 if (tp->syn_data_acked)
2764 info->tcpi_options |= TCPI_OPT_SYN_DATA;
2765
2766 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2767 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2768 info->tcpi_snd_mss = tp->mss_cache;
2769 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2770
2771 if (sk->sk_state == TCP_LISTEN) {
2772 info->tcpi_unacked = sk->sk_ack_backlog;
2773 info->tcpi_sacked = sk->sk_max_ack_backlog;
2774 } else {
2775 info->tcpi_unacked = tp->packets_out;
2776 info->tcpi_sacked = tp->sacked_out;
2777 }
2778 info->tcpi_lost = tp->lost_out;
2779 info->tcpi_retrans = tp->retrans_out;
2780 info->tcpi_fackets = tp->fackets_out;
2781
2782 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2783 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2784 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2785
2786 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2787 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2788 info->tcpi_rtt = tp->srtt_us >> 3;
2789 info->tcpi_rttvar = tp->mdev_us >> 2;
2790 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2791 info->tcpi_snd_cwnd = tp->snd_cwnd;
2792 info->tcpi_advmss = tp->advmss;
2793 info->tcpi_reordering = tp->reordering;
2794
2795 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2796 info->tcpi_rcv_space = tp->rcvq_space.space;
2797
2798 info->tcpi_total_retrans = tp->total_retrans;
2799
2800 info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
2801 sk->sk_pacing_rate : ~0ULL;
2802 info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
2803 sk->sk_max_pacing_rate : ~0ULL;
2804}
2805EXPORT_SYMBOL_GPL(tcp_get_info);
2806
2807static int do_tcp_getsockopt(struct sock *sk, int level,
2808 int optname, char __user *optval, int __user *optlen)
2809{
2810 struct inet_connection_sock *icsk = inet_csk(sk);
2811 struct tcp_sock *tp = tcp_sk(sk);
2812 int val, len;
2813
2814 if (get_user(len, optlen))
2815 return -EFAULT;
2816
2817 len = min_t(unsigned int, len, sizeof(int));
2818
2819 if (len < 0)
2820 return -EINVAL;
2821
2822 switch (optname) {
2823 case TCP_MAXSEG:
2824 val = tp->mss_cache;
2825 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2826 val = tp->rx_opt.user_mss;
2827 if (tp->repair)
2828 val = tp->rx_opt.mss_clamp;
2829 break;
2830 case TCP_NODELAY:
2831 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2832 break;
2833 case TCP_CORK:
2834 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2835 break;
2836 case TCP_KEEPIDLE:
2837 val = keepalive_time_when(tp) / HZ;
2838 break;
2839 case TCP_KEEPINTVL:
2840 val = keepalive_intvl_when(tp) / HZ;
2841 break;
2842 case TCP_KEEPCNT:
2843 val = keepalive_probes(tp);
2844 break;
2845 case TCP_SYNCNT:
2846 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2847 break;
2848 case TCP_LINGER2:
2849 val = tp->linger2;
2850 if (val >= 0)
2851 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2852 break;
2853 case TCP_DEFER_ACCEPT:
2854 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2855 TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2856 break;
2857 case TCP_WINDOW_CLAMP:
2858 val = tp->window_clamp;
2859 break;
2860 case TCP_INFO: {
2861 struct tcp_info info;
2862
2863 if (get_user(len, optlen))
2864 return -EFAULT;
2865
2866 tcp_get_info(sk, &info);
2867
2868 len = min_t(unsigned int, len, sizeof(info));
2869 if (put_user(len, optlen))
2870 return -EFAULT;
2871 if (copy_to_user(optval, &info, len))
2872 return -EFAULT;
2873 return 0;
2874 }
2875 case TCP_QUICKACK:
2876 val = !icsk->icsk_ack.pingpong;
2877 break;
2878
2879 case TCP_CONGESTION:
2880 if (get_user(len, optlen))
2881 return -EFAULT;
2882 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2883 if (put_user(len, optlen))
2884 return -EFAULT;
2885 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2886 return -EFAULT;
2887 return 0;
2888
2889 case TCP_THIN_LINEAR_TIMEOUTS:
2890 val = tp->thin_lto;
2891 break;
2892 case TCP_THIN_DUPACK:
2893 val = tp->thin_dupack;
2894 break;
2895
2896 case TCP_REPAIR:
2897 val = tp->repair;
2898 break;
2899
2900 case TCP_REPAIR_QUEUE:
2901 if (tp->repair)
2902 val = tp->repair_queue;
2903 else
2904 return -EINVAL;
2905 break;
2906
2907 case TCP_QUEUE_SEQ:
2908 if (tp->repair_queue == TCP_SEND_QUEUE)
2909 val = tp->write_seq;
2910 else if (tp->repair_queue == TCP_RECV_QUEUE)
2911 val = tp->rcv_nxt;
2912 else
2913 return -EINVAL;
2914 break;
2915
2916 case TCP_USER_TIMEOUT:
2917 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2918 break;
2919 case TCP_TIMESTAMP:
2920 val = tcp_time_stamp + tp->tsoffset;
2921 break;
2922 case TCP_NOTSENT_LOWAT:
2923 val = tp->notsent_lowat;
2924 break;
2925 default:
2926 return -ENOPROTOOPT;
2927 }
2928
2929 if (put_user(len, optlen))
2930 return -EFAULT;
2931 if (copy_to_user(optval, &val, len))
2932 return -EFAULT;
2933 return 0;
2934}
2935
2936int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2937 int __user *optlen)
2938{
2939 struct inet_connection_sock *icsk = inet_csk(sk);
2940
2941 if (level != SOL_TCP)
2942 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2943 optval, optlen);
2944 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2945}
2946EXPORT_SYMBOL(tcp_getsockopt);
2947
2948#ifdef CONFIG_COMPAT
2949int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2950 char __user *optval, int __user *optlen)
2951{
2952 if (level != SOL_TCP)
2953 return inet_csk_compat_getsockopt(sk, level, optname,
2954 optval, optlen);
2955 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2956}
2957EXPORT_SYMBOL(compat_tcp_getsockopt);
2958#endif
2959
2960#ifdef CONFIG_TCP_MD5SIG
2961static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
2962static DEFINE_MUTEX(tcp_md5sig_mutex);
2963
2964static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2965{
2966 int cpu;
2967
2968 for_each_possible_cpu(cpu) {
2969 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2970
2971 if (p->md5_desc.tfm)
2972 crypto_free_hash(p->md5_desc.tfm);
2973 }
2974 free_percpu(pool);
2975}
2976
2977static void __tcp_alloc_md5sig_pool(void)
2978{
2979 int cpu;
2980 struct tcp_md5sig_pool __percpu *pool;
2981
2982 pool = alloc_percpu(struct tcp_md5sig_pool);
2983 if (!pool)
2984 return;
2985
2986 for_each_possible_cpu(cpu) {
2987 struct crypto_hash *hash;
2988
2989 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2990 if (IS_ERR_OR_NULL(hash))
2991 goto out_free;
2992
2993 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
2994 }
2995 /* before setting tcp_md5sig_pool, we must commit all writes
2996 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
2997 */
2998 smp_wmb();
2999 tcp_md5sig_pool = pool;
3000 return;
3001out_free:
3002 __tcp_free_md5sig_pool(pool);
3003}
3004
3005bool tcp_alloc_md5sig_pool(void)
3006{
3007 if (unlikely(!tcp_md5sig_pool)) {
3008 mutex_lock(&tcp_md5sig_mutex);
3009
3010 if (!tcp_md5sig_pool)
3011 __tcp_alloc_md5sig_pool();
3012
3013 mutex_unlock(&tcp_md5sig_mutex);
3014 }
3015 return tcp_md5sig_pool != NULL;
3016}
3017EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3018
3019
3020/**
3021 * tcp_get_md5sig_pool - get md5sig_pool for this user
3022 *
3023 * We use percpu structure, so if we succeed, we exit with preemption
3024 * and BH disabled, to make sure another thread or softirq handling
3025 * wont try to get same context.
3026 */
3027struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3028{
3029 struct tcp_md5sig_pool __percpu *p;
3030
3031 local_bh_disable();
3032 p = ACCESS_ONCE(tcp_md5sig_pool);
3033 if (p)
3034 return __this_cpu_ptr(p);
3035
3036 local_bh_enable();
3037 return NULL;
3038}
3039EXPORT_SYMBOL(tcp_get_md5sig_pool);
3040
3041int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3042 const struct tcphdr *th)
3043{
3044 struct scatterlist sg;
3045 struct tcphdr hdr;
3046 int err;
3047
3048 /* We are not allowed to change tcphdr, make a local copy */
3049 memcpy(&hdr, th, sizeof(hdr));
3050 hdr.check = 0;
3051
3052 /* options aren't included in the hash */
3053 sg_init_one(&sg, &hdr, sizeof(hdr));
3054 err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3055 return err;
3056}
3057EXPORT_SYMBOL(tcp_md5_hash_header);
3058
3059int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3060 const struct sk_buff *skb, unsigned int header_len)
3061{
3062 struct scatterlist sg;
3063 const struct tcphdr *tp = tcp_hdr(skb);
3064 struct hash_desc *desc = &hp->md5_desc;
3065 unsigned int i;
3066 const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3067 skb_headlen(skb) - header_len : 0;
3068 const struct skb_shared_info *shi = skb_shinfo(skb);
3069 struct sk_buff *frag_iter;
3070
3071 sg_init_table(&sg, 1);
3072
3073 sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3074 if (crypto_hash_update(desc, &sg, head_data_len))
3075 return 1;
3076
3077 for (i = 0; i < shi->nr_frags; ++i) {
3078 const struct skb_frag_struct *f = &shi->frags[i];
3079 unsigned int offset = f->page_offset;
3080 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3081
3082 sg_set_page(&sg, page, skb_frag_size(f),
3083 offset_in_page(offset));
3084 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3085 return 1;
3086 }
3087
3088 skb_walk_frags(skb, frag_iter)
3089 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3090 return 1;
3091
3092 return 0;
3093}
3094EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3095
3096int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3097{
3098 struct scatterlist sg;
3099
3100 sg_init_one(&sg, key->key, key->keylen);
3101 return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3102}
3103EXPORT_SYMBOL(tcp_md5_hash_key);
3104
3105#endif
3106
3107void tcp_done(struct sock *sk)
3108{
3109 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3110
3111 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3112 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3113
3114 tcp_set_state(sk, TCP_CLOSE);
3115 tcp_clear_xmit_timers(sk);
3116 if (req != NULL)
3117 reqsk_fastopen_remove(sk, req, false);
3118
3119 sk->sk_shutdown = SHUTDOWN_MASK;
3120
3121 if (!sock_flag(sk, SOCK_DEAD))
3122 sk->sk_state_change(sk);
3123 else
3124 inet_csk_destroy_sock(sk);
3125}
3126EXPORT_SYMBOL_GPL(tcp_done);
3127
3128extern struct tcp_congestion_ops tcp_reno;
3129
3130static __initdata unsigned long thash_entries;
3131static int __init set_thash_entries(char *str)
3132{
3133 ssize_t ret;
3134
3135 if (!str)
3136 return 0;
3137
3138 ret = kstrtoul(str, 0, &thash_entries);
3139 if (ret)
3140 return 0;
3141
3142 return 1;
3143}
3144__setup("thash_entries=", set_thash_entries);
3145
3146static void tcp_init_mem(void)
3147{
3148 unsigned long limit = nr_free_buffer_pages() / 8;
3149 limit = max(limit, 128UL);
3150 sysctl_tcp_mem[0] = limit / 4 * 3;
3151 sysctl_tcp_mem[1] = limit;
3152 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
3153}
3154
3155void __init tcp_init(void)
3156{
3157 struct sk_buff *skb = NULL;
3158 unsigned long limit;
3159 int max_rshare, max_wshare, cnt;
3160 unsigned int i;
3161
3162 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3163
3164 percpu_counter_init(&tcp_sockets_allocated, 0);
3165 percpu_counter_init(&tcp_orphan_count, 0);
3166 tcp_hashinfo.bind_bucket_cachep =
3167 kmem_cache_create("tcp_bind_bucket",
3168 sizeof(struct inet_bind_bucket), 0,
3169 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3170
3171 /* Size and allocate the main established and bind bucket
3172 * hash tables.
3173 *
3174 * The methodology is similar to that of the buffer cache.
3175 */
3176 tcp_hashinfo.ehash =
3177 alloc_large_system_hash("TCP established",
3178 sizeof(struct inet_ehash_bucket),
3179 thash_entries,
3180 17, /* one slot per 128 KB of memory */
3181 0,
3182 NULL,
3183 &tcp_hashinfo.ehash_mask,
3184 0,
3185 thash_entries ? 0 : 512 * 1024);
3186 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3187 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3188
3189 if (inet_ehash_locks_alloc(&tcp_hashinfo))
3190 panic("TCP: failed to alloc ehash_locks");
3191 tcp_hashinfo.bhash =
3192 alloc_large_system_hash("TCP bind",
3193 sizeof(struct inet_bind_hashbucket),
3194 tcp_hashinfo.ehash_mask + 1,
3195 17, /* one slot per 128 KB of memory */
3196 0,
3197 &tcp_hashinfo.bhash_size,
3198 NULL,
3199 0,
3200 64 * 1024);
3201 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3202 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3203 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3204 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3205 }
3206
3207
3208 cnt = tcp_hashinfo.ehash_mask + 1;
3209
3210 tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3211 sysctl_tcp_max_orphans = cnt / 2;
3212 sysctl_max_syn_backlog = max(128, cnt / 256);
3213
3214 tcp_init_mem();
3215 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3216 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3217 max_wshare = min(4UL*1024*1024, limit);
3218 max_rshare = min(6UL*1024*1024, limit);
3219
3220 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3221 sysctl_tcp_wmem[1] = 16*1024;
3222 sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3223
3224 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3225 sysctl_tcp_rmem[1] = 87380;
3226 sysctl_tcp_rmem[2] = max(87380, max_rshare);
3227
3228 pr_info("Hash tables configured (established %u bind %u)\n",
3229 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3230
3231 tcp_metrics_init();
3232
3233 tcp_register_congestion_control(&tcp_reno);
3234
3235 tcp_tasklet_init();
3236}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 *
21 * Fixes:
22 * Alan Cox : Numerous verify_area() calls
23 * Alan Cox : Set the ACK bit on a reset
24 * Alan Cox : Stopped it crashing if it closed while
25 * sk->inuse=1 and was trying to connect
26 * (tcp_err()).
27 * Alan Cox : All icmp error handling was broken
28 * pointers passed where wrong and the
29 * socket was looked up backwards. Nobody
30 * tested any icmp error code obviously.
31 * Alan Cox : tcp_err() now handled properly. It
32 * wakes people on errors. poll
33 * behaves and the icmp error race
34 * has gone by moving it into sock.c
35 * Alan Cox : tcp_send_reset() fixed to work for
36 * everything not just packets for
37 * unknown sockets.
38 * Alan Cox : tcp option processing.
39 * Alan Cox : Reset tweaked (still not 100%) [Had
40 * syn rule wrong]
41 * Herp Rosmanith : More reset fixes
42 * Alan Cox : No longer acks invalid rst frames.
43 * Acking any kind of RST is right out.
44 * Alan Cox : Sets an ignore me flag on an rst
45 * receive otherwise odd bits of prattle
46 * escape still
47 * Alan Cox : Fixed another acking RST frame bug.
48 * Should stop LAN workplace lockups.
49 * Alan Cox : Some tidyups using the new skb list
50 * facilities
51 * Alan Cox : sk->keepopen now seems to work
52 * Alan Cox : Pulls options out correctly on accepts
53 * Alan Cox : Fixed assorted sk->rqueue->next errors
54 * Alan Cox : PSH doesn't end a TCP read. Switched a
55 * bit to skb ops.
56 * Alan Cox : Tidied tcp_data to avoid a potential
57 * nasty.
58 * Alan Cox : Added some better commenting, as the
59 * tcp is hard to follow
60 * Alan Cox : Removed incorrect check for 20 * psh
61 * Michael O'Reilly : ack < copied bug fix.
62 * Johannes Stille : Misc tcp fixes (not all in yet).
63 * Alan Cox : FIN with no memory -> CRASH
64 * Alan Cox : Added socket option proto entries.
65 * Also added awareness of them to accept.
66 * Alan Cox : Added TCP options (SOL_TCP)
67 * Alan Cox : Switched wakeup calls to callbacks,
68 * so the kernel can layer network
69 * sockets.
70 * Alan Cox : Use ip_tos/ip_ttl settings.
71 * Alan Cox : Handle FIN (more) properly (we hope).
72 * Alan Cox : RST frames sent on unsynchronised
73 * state ack error.
74 * Alan Cox : Put in missing check for SYN bit.
75 * Alan Cox : Added tcp_select_window() aka NET2E
76 * window non shrink trick.
77 * Alan Cox : Added a couple of small NET2E timer
78 * fixes
79 * Charles Hedrick : TCP fixes
80 * Toomas Tamm : TCP window fixes
81 * Alan Cox : Small URG fix to rlogin ^C ack fight
82 * Charles Hedrick : Rewrote most of it to actually work
83 * Linus : Rewrote tcp_read() and URG handling
84 * completely
85 * Gerhard Koerting: Fixed some missing timer handling
86 * Matthew Dillon : Reworked TCP machine states as per RFC
87 * Gerhard Koerting: PC/TCP workarounds
88 * Adam Caldwell : Assorted timer/timing errors
89 * Matthew Dillon : Fixed another RST bug
90 * Alan Cox : Move to kernel side addressing changes.
91 * Alan Cox : Beginning work on TCP fastpathing
92 * (not yet usable)
93 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
94 * Alan Cox : TCP fast path debugging
95 * Alan Cox : Window clamping
96 * Michael Riepe : Bug in tcp_check()
97 * Matt Dillon : More TCP improvements and RST bug fixes
98 * Matt Dillon : Yet more small nasties remove from the
99 * TCP code (Be very nice to this man if
100 * tcp finally works 100%) 8)
101 * Alan Cox : BSD accept semantics.
102 * Alan Cox : Reset on closedown bug.
103 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
104 * Michael Pall : Handle poll() after URG properly in
105 * all cases.
106 * Michael Pall : Undo the last fix in tcp_read_urg()
107 * (multi URG PUSH broke rlogin).
108 * Michael Pall : Fix the multi URG PUSH problem in
109 * tcp_readable(), poll() after URG
110 * works now.
111 * Michael Pall : recv(...,MSG_OOB) never blocks in the
112 * BSD api.
113 * Alan Cox : Changed the semantics of sk->socket to
114 * fix a race and a signal problem with
115 * accept() and async I/O.
116 * Alan Cox : Relaxed the rules on tcp_sendto().
117 * Yury Shevchuk : Really fixed accept() blocking problem.
118 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
119 * clients/servers which listen in on
120 * fixed ports.
121 * Alan Cox : Cleaned the above up and shrank it to
122 * a sensible code size.
123 * Alan Cox : Self connect lockup fix.
124 * Alan Cox : No connect to multicast.
125 * Ross Biro : Close unaccepted children on master
126 * socket close.
127 * Alan Cox : Reset tracing code.
128 * Alan Cox : Spurious resets on shutdown.
129 * Alan Cox : Giant 15 minute/60 second timer error
130 * Alan Cox : Small whoops in polling before an
131 * accept.
132 * Alan Cox : Kept the state trace facility since
133 * it's handy for debugging.
134 * Alan Cox : More reset handler fixes.
135 * Alan Cox : Started rewriting the code based on
136 * the RFC's for other useful protocol
137 * references see: Comer, KA9Q NOS, and
138 * for a reference on the difference
139 * between specifications and how BSD
140 * works see the 4.4lite source.
141 * A.N.Kuznetsov : Don't time wait on completion of tidy
142 * close.
143 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
144 * Linus Torvalds : Fixed BSD port reuse to work first syn
145 * Alan Cox : Reimplemented timers as per the RFC
146 * and using multiple timers for sanity.
147 * Alan Cox : Small bug fixes, and a lot of new
148 * comments.
149 * Alan Cox : Fixed dual reader crash by locking
150 * the buffers (much like datagram.c)
151 * Alan Cox : Fixed stuck sockets in probe. A probe
152 * now gets fed up of retrying without
153 * (even a no space) answer.
154 * Alan Cox : Extracted closing code better
155 * Alan Cox : Fixed the closing state machine to
156 * resemble the RFC.
157 * Alan Cox : More 'per spec' fixes.
158 * Jorge Cwik : Even faster checksumming.
159 * Alan Cox : tcp_data() doesn't ack illegal PSH
160 * only frames. At least one pc tcp stack
161 * generates them.
162 * Alan Cox : Cache last socket.
163 * Alan Cox : Per route irtt.
164 * Matt Day : poll()->select() match BSD precisely on error
165 * Alan Cox : New buffers
166 * Marc Tamsky : Various sk->prot->retransmits and
167 * sk->retransmits misupdating fixed.
168 * Fixed tcp_write_timeout: stuck close,
169 * and TCP syn retries gets used now.
170 * Mark Yarvis : In tcp_read_wakeup(), don't send an
171 * ack if state is TCP_CLOSED.
172 * Alan Cox : Look up device on a retransmit - routes may
173 * change. Doesn't yet cope with MSS shrink right
174 * but it's a start!
175 * Marc Tamsky : Closing in closing fixes.
176 * Mike Shaver : RFC1122 verifications.
177 * Alan Cox : rcv_saddr errors.
178 * Alan Cox : Block double connect().
179 * Alan Cox : Small hooks for enSKIP.
180 * Alexey Kuznetsov: Path MTU discovery.
181 * Alan Cox : Support soft errors.
182 * Alan Cox : Fix MTU discovery pathological case
183 * when the remote claims no mtu!
184 * Marc Tamsky : TCP_CLOSE fix.
185 * Colin (G3TNE) : Send a reset on syn ack replies in
186 * window but wrong (fixes NT lpd problems)
187 * Pedro Roque : Better TCP window handling, delayed ack.
188 * Joerg Reuter : No modification of locked buffers in
189 * tcp_do_retransmit()
190 * Eric Schenk : Changed receiver side silly window
191 * avoidance algorithm to BSD style
192 * algorithm. This doubles throughput
193 * against machines running Solaris,
194 * and seems to result in general
195 * improvement.
196 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
197 * Willy Konynenberg : Transparent proxying support.
198 * Mike McLagan : Routing by source
199 * Keith Owens : Do proper merging with partial SKB's in
200 * tcp_do_sendmsg to avoid burstiness.
201 * Eric Schenk : Fix fast close down bug with
202 * shutdown() followed by close().
203 * Andi Kleen : Make poll agree with SIGIO
204 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
205 * lingertime == 0 (RFC 793 ABORT Call)
206 * Hirokazu Takahashi : Use copy_from_user() instead of
207 * csum_and_copy_from_user() if possible.
208 *
209 * Description of States:
210 *
211 * TCP_SYN_SENT sent a connection request, waiting for ack
212 *
213 * TCP_SYN_RECV received a connection request, sent ack,
214 * waiting for final ack in three-way handshake.
215 *
216 * TCP_ESTABLISHED connection established
217 *
218 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
219 * transmission of remaining buffered data
220 *
221 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
222 * to shutdown
223 *
224 * TCP_CLOSING both sides have shutdown but we still have
225 * data we have to finish sending
226 *
227 * TCP_TIME_WAIT timeout to catch resent junk before entering
228 * closed, can only be entered from FIN_WAIT2
229 * or CLOSING. Required because the other end
230 * may not have gotten our last ACK causing it
231 * to retransmit the data packet (which we ignore)
232 *
233 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
234 * us to finish writing our data and to shutdown
235 * (we have to close() to move on to LAST_ACK)
236 *
237 * TCP_LAST_ACK out side has shutdown after remote has
238 * shutdown. There may still be data in our
239 * buffer that we have to finish sending
240 *
241 * TCP_CLOSE socket is finished
242 */
243
244#define pr_fmt(fmt) "TCP: " fmt
245
246#include <crypto/hash.h>
247#include <linux/kernel.h>
248#include <linux/module.h>
249#include <linux/types.h>
250#include <linux/fcntl.h>
251#include <linux/poll.h>
252#include <linux/inet_diag.h>
253#include <linux/init.h>
254#include <linux/fs.h>
255#include <linux/skbuff.h>
256#include <linux/scatterlist.h>
257#include <linux/splice.h>
258#include <linux/net.h>
259#include <linux/socket.h>
260#include <linux/random.h>
261#include <linux/memblock.h>
262#include <linux/highmem.h>
263#include <linux/cache.h>
264#include <linux/err.h>
265#include <linux/time.h>
266#include <linux/slab.h>
267#include <linux/errqueue.h>
268#include <linux/static_key.h>
269#include <linux/btf.h>
270
271#include <net/icmp.h>
272#include <net/inet_common.h>
273#include <net/tcp.h>
274#include <net/mptcp.h>
275#include <net/proto_memory.h>
276#include <net/xfrm.h>
277#include <net/ip.h>
278#include <net/sock.h>
279#include <net/rstreason.h>
280
281#include <linux/uaccess.h>
282#include <asm/ioctls.h>
283#include <net/busy_poll.h>
284#include <net/hotdata.h>
285#include <trace/events/tcp.h>
286#include <net/rps.h>
287
288#include "../core/devmem.h"
289
290/* Track pending CMSGs. */
291enum {
292 TCP_CMSG_INQ = 1,
293 TCP_CMSG_TS = 2
294};
295
296DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
297EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
298
299DEFINE_PER_CPU(u32, tcp_tw_isn);
300EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn);
301
302long sysctl_tcp_mem[3] __read_mostly;
303EXPORT_SYMBOL(sysctl_tcp_mem);
304
305atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */
306EXPORT_SYMBOL(tcp_memory_allocated);
307DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
308EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc);
309
310#if IS_ENABLED(CONFIG_SMC)
311DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
312EXPORT_SYMBOL(tcp_have_smc);
313#endif
314
315/*
316 * Current number of TCP sockets.
317 */
318struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
319EXPORT_SYMBOL(tcp_sockets_allocated);
320
321/*
322 * TCP splice context
323 */
324struct tcp_splice_state {
325 struct pipe_inode_info *pipe;
326 size_t len;
327 unsigned int flags;
328};
329
330/*
331 * Pressure flag: try to collapse.
332 * Technical note: it is used by multiple contexts non atomically.
333 * All the __sk_mem_schedule() is of this nature: accounting
334 * is strict, actions are advisory and have some latency.
335 */
336unsigned long tcp_memory_pressure __read_mostly;
337EXPORT_SYMBOL_GPL(tcp_memory_pressure);
338
339void tcp_enter_memory_pressure(struct sock *sk)
340{
341 unsigned long val;
342
343 if (READ_ONCE(tcp_memory_pressure))
344 return;
345 val = jiffies;
346
347 if (!val)
348 val--;
349 if (!cmpxchg(&tcp_memory_pressure, 0, val))
350 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
351}
352EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
353
354void tcp_leave_memory_pressure(struct sock *sk)
355{
356 unsigned long val;
357
358 if (!READ_ONCE(tcp_memory_pressure))
359 return;
360 val = xchg(&tcp_memory_pressure, 0);
361 if (val)
362 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
363 jiffies_to_msecs(jiffies - val));
364}
365EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
366
367/* Convert seconds to retransmits based on initial and max timeout */
368static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
369{
370 u8 res = 0;
371
372 if (seconds > 0) {
373 int period = timeout;
374
375 res = 1;
376 while (seconds > period && res < 255) {
377 res++;
378 timeout <<= 1;
379 if (timeout > rto_max)
380 timeout = rto_max;
381 period += timeout;
382 }
383 }
384 return res;
385}
386
387/* Convert retransmits to seconds based on initial and max timeout */
388static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
389{
390 int period = 0;
391
392 if (retrans > 0) {
393 period = timeout;
394 while (--retrans) {
395 timeout <<= 1;
396 if (timeout > rto_max)
397 timeout = rto_max;
398 period += timeout;
399 }
400 }
401 return period;
402}
403
404static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
405{
406 u32 rate = READ_ONCE(tp->rate_delivered);
407 u32 intv = READ_ONCE(tp->rate_interval_us);
408 u64 rate64 = 0;
409
410 if (rate && intv) {
411 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
412 do_div(rate64, intv);
413 }
414 return rate64;
415}
416
417/* Address-family independent initialization for a tcp_sock.
418 *
419 * NOTE: A lot of things set to zero explicitly by call to
420 * sk_alloc() so need not be done here.
421 */
422void tcp_init_sock(struct sock *sk)
423{
424 struct inet_connection_sock *icsk = inet_csk(sk);
425 struct tcp_sock *tp = tcp_sk(sk);
426 int rto_min_us;
427
428 tp->out_of_order_queue = RB_ROOT;
429 sk->tcp_rtx_queue = RB_ROOT;
430 tcp_init_xmit_timers(sk);
431 INIT_LIST_HEAD(&tp->tsq_node);
432 INIT_LIST_HEAD(&tp->tsorted_sent_queue);
433
434 icsk->icsk_rto = TCP_TIMEOUT_INIT;
435 rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us);
436 icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us);
437 icsk->icsk_delack_max = TCP_DELACK_MAX;
438 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
439 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
440
441 /* So many TCP implementations out there (incorrectly) count the
442 * initial SYN frame in their delayed-ACK and congestion control
443 * algorithms that we must have the following bandaid to talk
444 * efficiently to them. -DaveM
445 */
446 tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
447
448 /* There's a bubble in the pipe until at least the first ACK. */
449 tp->app_limited = ~0U;
450 tp->rate_app_limited = 1;
451
452 /* See draft-stevens-tcpca-spec-01 for discussion of the
453 * initialization of these values.
454 */
455 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
456 tp->snd_cwnd_clamp = ~0;
457 tp->mss_cache = TCP_MSS_DEFAULT;
458
459 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
460 tcp_assign_congestion_control(sk);
461
462 tp->tsoffset = 0;
463 tp->rack.reo_wnd_steps = 1;
464
465 sk->sk_write_space = sk_stream_write_space;
466 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
467
468 icsk->icsk_sync_mss = tcp_sync_mss;
469
470 WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
471 WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
472 tcp_scaling_ratio_init(sk);
473
474 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
475 sk_sockets_allocated_inc(sk);
476 xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1);
477}
478EXPORT_SYMBOL(tcp_init_sock);
479
480static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc)
481{
482 struct sk_buff *skb = tcp_write_queue_tail(sk);
483 u32 tsflags = sockc->tsflags;
484
485 if (tsflags && skb) {
486 struct skb_shared_info *shinfo = skb_shinfo(skb);
487 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
488
489 sock_tx_timestamp(sk, sockc, &shinfo->tx_flags);
490 if (tsflags & SOF_TIMESTAMPING_TX_ACK)
491 tcb->txstamp_ack = 1;
492 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
493 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
494 }
495}
496
497static bool tcp_stream_is_readable(struct sock *sk, int target)
498{
499 if (tcp_epollin_ready(sk, target))
500 return true;
501 return sk_is_readable(sk);
502}
503
504/*
505 * Wait for a TCP event.
506 *
507 * Note that we don't need to lock the socket, as the upper poll layers
508 * take care of normal races (between the test and the event) and we don't
509 * go look at any of the socket buffers directly.
510 */
511__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
512{
513 __poll_t mask;
514 struct sock *sk = sock->sk;
515 const struct tcp_sock *tp = tcp_sk(sk);
516 u8 shutdown;
517 int state;
518
519 sock_poll_wait(file, sock, wait);
520
521 state = inet_sk_state_load(sk);
522 if (state == TCP_LISTEN)
523 return inet_csk_listen_poll(sk);
524
525 /* Socket is not locked. We are protected from async events
526 * by poll logic and correct handling of state changes
527 * made by other threads is impossible in any case.
528 */
529
530 mask = 0;
531
532 /*
533 * EPOLLHUP is certainly not done right. But poll() doesn't
534 * have a notion of HUP in just one direction, and for a
535 * socket the read side is more interesting.
536 *
537 * Some poll() documentation says that EPOLLHUP is incompatible
538 * with the EPOLLOUT/POLLWR flags, so somebody should check this
539 * all. But careful, it tends to be safer to return too many
540 * bits than too few, and you can easily break real applications
541 * if you don't tell them that something has hung up!
542 *
543 * Check-me.
544 *
545 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
546 * our fs/select.c). It means that after we received EOF,
547 * poll always returns immediately, making impossible poll() on write()
548 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
549 * if and only if shutdown has been made in both directions.
550 * Actually, it is interesting to look how Solaris and DUX
551 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
552 * then we could set it on SND_SHUTDOWN. BTW examples given
553 * in Stevens' books assume exactly this behaviour, it explains
554 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
555 *
556 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
557 * blocking on fresh not-connected or disconnected socket. --ANK
558 */
559 shutdown = READ_ONCE(sk->sk_shutdown);
560 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
561 mask |= EPOLLHUP;
562 if (shutdown & RCV_SHUTDOWN)
563 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
564
565 /* Connected or passive Fast Open socket? */
566 if (state != TCP_SYN_SENT &&
567 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
568 int target = sock_rcvlowat(sk, 0, INT_MAX);
569 u16 urg_data = READ_ONCE(tp->urg_data);
570
571 if (unlikely(urg_data) &&
572 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
573 !sock_flag(sk, SOCK_URGINLINE))
574 target++;
575
576 if (tcp_stream_is_readable(sk, target))
577 mask |= EPOLLIN | EPOLLRDNORM;
578
579 if (!(shutdown & SEND_SHUTDOWN)) {
580 if (__sk_stream_is_writeable(sk, 1)) {
581 mask |= EPOLLOUT | EPOLLWRNORM;
582 } else { /* send SIGIO later */
583 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
584 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
585
586 /* Race breaker. If space is freed after
587 * wspace test but before the flags are set,
588 * IO signal will be lost. Memory barrier
589 * pairs with the input side.
590 */
591 smp_mb__after_atomic();
592 if (__sk_stream_is_writeable(sk, 1))
593 mask |= EPOLLOUT | EPOLLWRNORM;
594 }
595 } else
596 mask |= EPOLLOUT | EPOLLWRNORM;
597
598 if (urg_data & TCP_URG_VALID)
599 mask |= EPOLLPRI;
600 } else if (state == TCP_SYN_SENT &&
601 inet_test_bit(DEFER_CONNECT, sk)) {
602 /* Active TCP fastopen socket with defer_connect
603 * Return EPOLLOUT so application can call write()
604 * in order for kernel to generate SYN+data
605 */
606 mask |= EPOLLOUT | EPOLLWRNORM;
607 }
608 /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */
609 smp_rmb();
610 if (READ_ONCE(sk->sk_err) ||
611 !skb_queue_empty_lockless(&sk->sk_error_queue))
612 mask |= EPOLLERR;
613
614 return mask;
615}
616EXPORT_SYMBOL(tcp_poll);
617
618int tcp_ioctl(struct sock *sk, int cmd, int *karg)
619{
620 struct tcp_sock *tp = tcp_sk(sk);
621 int answ;
622 bool slow;
623
624 switch (cmd) {
625 case SIOCINQ:
626 if (sk->sk_state == TCP_LISTEN)
627 return -EINVAL;
628
629 slow = lock_sock_fast(sk);
630 answ = tcp_inq(sk);
631 unlock_sock_fast(sk, slow);
632 break;
633 case SIOCATMARK:
634 answ = READ_ONCE(tp->urg_data) &&
635 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
636 break;
637 case SIOCOUTQ:
638 if (sk->sk_state == TCP_LISTEN)
639 return -EINVAL;
640
641 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
642 answ = 0;
643 else
644 answ = READ_ONCE(tp->write_seq) - tp->snd_una;
645 break;
646 case SIOCOUTQNSD:
647 if (sk->sk_state == TCP_LISTEN)
648 return -EINVAL;
649
650 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
651 answ = 0;
652 else
653 answ = READ_ONCE(tp->write_seq) -
654 READ_ONCE(tp->snd_nxt);
655 break;
656 default:
657 return -ENOIOCTLCMD;
658 }
659
660 *karg = answ;
661 return 0;
662}
663EXPORT_SYMBOL(tcp_ioctl);
664
665void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
666{
667 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
668 tp->pushed_seq = tp->write_seq;
669}
670
671static inline bool forced_push(const struct tcp_sock *tp)
672{
673 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
674}
675
676void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
677{
678 struct tcp_sock *tp = tcp_sk(sk);
679 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
680
681 tcb->seq = tcb->end_seq = tp->write_seq;
682 tcb->tcp_flags = TCPHDR_ACK;
683 __skb_header_release(skb);
684 tcp_add_write_queue_tail(sk, skb);
685 sk_wmem_queued_add(sk, skb->truesize);
686 sk_mem_charge(sk, skb->truesize);
687 if (tp->nonagle & TCP_NAGLE_PUSH)
688 tp->nonagle &= ~TCP_NAGLE_PUSH;
689
690 tcp_slow_start_after_idle_check(sk);
691}
692
693static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
694{
695 if (flags & MSG_OOB)
696 tp->snd_up = tp->write_seq;
697}
698
699/* If a not yet filled skb is pushed, do not send it if
700 * we have data packets in Qdisc or NIC queues :
701 * Because TX completion will happen shortly, it gives a chance
702 * to coalesce future sendmsg() payload into this skb, without
703 * need for a timer, and with no latency trade off.
704 * As packets containing data payload have a bigger truesize
705 * than pure acks (dataless) packets, the last checks prevent
706 * autocorking if we only have an ACK in Qdisc/NIC queues,
707 * or if TX completion was delayed after we processed ACK packet.
708 */
709static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
710 int size_goal)
711{
712 return skb->len < size_goal &&
713 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
714 !tcp_rtx_queue_empty(sk) &&
715 refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
716 tcp_skb_can_collapse_to(skb);
717}
718
719void tcp_push(struct sock *sk, int flags, int mss_now,
720 int nonagle, int size_goal)
721{
722 struct tcp_sock *tp = tcp_sk(sk);
723 struct sk_buff *skb;
724
725 skb = tcp_write_queue_tail(sk);
726 if (!skb)
727 return;
728 if (!(flags & MSG_MORE) || forced_push(tp))
729 tcp_mark_push(tp, skb);
730
731 tcp_mark_urg(tp, flags);
732
733 if (tcp_should_autocork(sk, skb, size_goal)) {
734
735 /* avoid atomic op if TSQ_THROTTLED bit is already set */
736 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
737 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
738 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
739 smp_mb__after_atomic();
740 }
741 /* It is possible TX completion already happened
742 * before we set TSQ_THROTTLED.
743 */
744 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
745 return;
746 }
747
748 if (flags & MSG_MORE)
749 nonagle = TCP_NAGLE_CORK;
750
751 __tcp_push_pending_frames(sk, mss_now, nonagle);
752}
753
754static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
755 unsigned int offset, size_t len)
756{
757 struct tcp_splice_state *tss = rd_desc->arg.data;
758 int ret;
759
760 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
761 min(rd_desc->count, len), tss->flags);
762 if (ret > 0)
763 rd_desc->count -= ret;
764 return ret;
765}
766
767static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
768{
769 /* Store TCP splice context information in read_descriptor_t. */
770 read_descriptor_t rd_desc = {
771 .arg.data = tss,
772 .count = tss->len,
773 };
774
775 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
776}
777
778/**
779 * tcp_splice_read - splice data from TCP socket to a pipe
780 * @sock: socket to splice from
781 * @ppos: position (not valid)
782 * @pipe: pipe to splice to
783 * @len: number of bytes to splice
784 * @flags: splice modifier flags
785 *
786 * Description:
787 * Will read pages from given socket and fill them into a pipe.
788 *
789 **/
790ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
791 struct pipe_inode_info *pipe, size_t len,
792 unsigned int flags)
793{
794 struct sock *sk = sock->sk;
795 struct tcp_splice_state tss = {
796 .pipe = pipe,
797 .len = len,
798 .flags = flags,
799 };
800 long timeo;
801 ssize_t spliced;
802 int ret;
803
804 sock_rps_record_flow(sk);
805 /*
806 * We can't seek on a socket input
807 */
808 if (unlikely(*ppos))
809 return -ESPIPE;
810
811 ret = spliced = 0;
812
813 lock_sock(sk);
814
815 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
816 while (tss.len) {
817 ret = __tcp_splice_read(sk, &tss);
818 if (ret < 0)
819 break;
820 else if (!ret) {
821 if (spliced)
822 break;
823 if (sock_flag(sk, SOCK_DONE))
824 break;
825 if (sk->sk_err) {
826 ret = sock_error(sk);
827 break;
828 }
829 if (sk->sk_shutdown & RCV_SHUTDOWN)
830 break;
831 if (sk->sk_state == TCP_CLOSE) {
832 /*
833 * This occurs when user tries to read
834 * from never connected socket.
835 */
836 ret = -ENOTCONN;
837 break;
838 }
839 if (!timeo) {
840 ret = -EAGAIN;
841 break;
842 }
843 /* if __tcp_splice_read() got nothing while we have
844 * an skb in receive queue, we do not want to loop.
845 * This might happen with URG data.
846 */
847 if (!skb_queue_empty(&sk->sk_receive_queue))
848 break;
849 ret = sk_wait_data(sk, &timeo, NULL);
850 if (ret < 0)
851 break;
852 if (signal_pending(current)) {
853 ret = sock_intr_errno(timeo);
854 break;
855 }
856 continue;
857 }
858 tss.len -= ret;
859 spliced += ret;
860
861 if (!tss.len || !timeo)
862 break;
863 release_sock(sk);
864 lock_sock(sk);
865
866 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
867 (sk->sk_shutdown & RCV_SHUTDOWN) ||
868 signal_pending(current))
869 break;
870 }
871
872 release_sock(sk);
873
874 if (spliced)
875 return spliced;
876
877 return ret;
878}
879EXPORT_SYMBOL(tcp_splice_read);
880
881struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
882 bool force_schedule)
883{
884 struct sk_buff *skb;
885
886 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
887 if (likely(skb)) {
888 bool mem_scheduled;
889
890 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
891 if (force_schedule) {
892 mem_scheduled = true;
893 sk_forced_mem_schedule(sk, skb->truesize);
894 } else {
895 mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
896 }
897 if (likely(mem_scheduled)) {
898 skb_reserve(skb, MAX_TCP_HEADER);
899 skb->ip_summed = CHECKSUM_PARTIAL;
900 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
901 return skb;
902 }
903 __kfree_skb(skb);
904 } else {
905 sk->sk_prot->enter_memory_pressure(sk);
906 sk_stream_moderate_sndbuf(sk);
907 }
908 return NULL;
909}
910
911static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
912 int large_allowed)
913{
914 struct tcp_sock *tp = tcp_sk(sk);
915 u32 new_size_goal, size_goal;
916
917 if (!large_allowed)
918 return mss_now;
919
920 /* Note : tcp_tso_autosize() will eventually split this later */
921 new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size);
922
923 /* We try hard to avoid divides here */
924 size_goal = tp->gso_segs * mss_now;
925 if (unlikely(new_size_goal < size_goal ||
926 new_size_goal >= size_goal + mss_now)) {
927 tp->gso_segs = min_t(u16, new_size_goal / mss_now,
928 sk->sk_gso_max_segs);
929 size_goal = tp->gso_segs * mss_now;
930 }
931
932 return max(size_goal, mss_now);
933}
934
935int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
936{
937 int mss_now;
938
939 mss_now = tcp_current_mss(sk);
940 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
941
942 return mss_now;
943}
944
945/* In some cases, sendmsg() could have added an skb to the write queue,
946 * but failed adding payload on it. We need to remove it to consume less
947 * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
948 * epoll() users. Another reason is that tcp_write_xmit() does not like
949 * finding an empty skb in the write queue.
950 */
951void tcp_remove_empty_skb(struct sock *sk)
952{
953 struct sk_buff *skb = tcp_write_queue_tail(sk);
954
955 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
956 tcp_unlink_write_queue(skb, sk);
957 if (tcp_write_queue_empty(sk))
958 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
959 tcp_wmem_free_skb(sk, skb);
960 }
961}
962
963/* skb changing from pure zc to mixed, must charge zc */
964static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
965{
966 if (unlikely(skb_zcopy_pure(skb))) {
967 u32 extra = skb->truesize -
968 SKB_TRUESIZE(skb_end_offset(skb));
969
970 if (!sk_wmem_schedule(sk, extra))
971 return -ENOMEM;
972
973 sk_mem_charge(sk, extra);
974 skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
975 }
976 return 0;
977}
978
979
980int tcp_wmem_schedule(struct sock *sk, int copy)
981{
982 int left;
983
984 if (likely(sk_wmem_schedule(sk, copy)))
985 return copy;
986
987 /* We could be in trouble if we have nothing queued.
988 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
989 * to guarantee some progress.
990 */
991 left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued;
992 if (left > 0)
993 sk_forced_mem_schedule(sk, min(left, copy));
994 return min(copy, sk->sk_forward_alloc);
995}
996
997void tcp_free_fastopen_req(struct tcp_sock *tp)
998{
999 if (tp->fastopen_req) {
1000 kfree(tp->fastopen_req);
1001 tp->fastopen_req = NULL;
1002 }
1003}
1004
1005int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
1006 size_t size, struct ubuf_info *uarg)
1007{
1008 struct tcp_sock *tp = tcp_sk(sk);
1009 struct inet_sock *inet = inet_sk(sk);
1010 struct sockaddr *uaddr = msg->msg_name;
1011 int err, flags;
1012
1013 if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) &
1014 TFO_CLIENT_ENABLE) ||
1015 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1016 uaddr->sa_family == AF_UNSPEC))
1017 return -EOPNOTSUPP;
1018 if (tp->fastopen_req)
1019 return -EALREADY; /* Another Fast Open is in progress */
1020
1021 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1022 sk->sk_allocation);
1023 if (unlikely(!tp->fastopen_req))
1024 return -ENOBUFS;
1025 tp->fastopen_req->data = msg;
1026 tp->fastopen_req->size = size;
1027 tp->fastopen_req->uarg = uarg;
1028
1029 if (inet_test_bit(DEFER_CONNECT, sk)) {
1030 err = tcp_connect(sk);
1031 /* Same failure procedure as in tcp_v4/6_connect */
1032 if (err) {
1033 tcp_set_state(sk, TCP_CLOSE);
1034 inet->inet_dport = 0;
1035 sk->sk_route_caps = 0;
1036 }
1037 }
1038 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1039 err = __inet_stream_connect(sk->sk_socket, uaddr,
1040 msg->msg_namelen, flags, 1);
1041 /* fastopen_req could already be freed in __inet_stream_connect
1042 * if the connection times out or gets rst
1043 */
1044 if (tp->fastopen_req) {
1045 *copied = tp->fastopen_req->copied;
1046 tcp_free_fastopen_req(tp);
1047 inet_clear_bit(DEFER_CONNECT, sk);
1048 }
1049 return err;
1050}
1051
1052int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1053{
1054 struct tcp_sock *tp = tcp_sk(sk);
1055 struct ubuf_info *uarg = NULL;
1056 struct sk_buff *skb;
1057 struct sockcm_cookie sockc;
1058 int flags, err, copied = 0;
1059 int mss_now = 0, size_goal, copied_syn = 0;
1060 int process_backlog = 0;
1061 int zc = 0;
1062 long timeo;
1063
1064 flags = msg->msg_flags;
1065
1066 if ((flags & MSG_ZEROCOPY) && size) {
1067 if (msg->msg_ubuf) {
1068 uarg = msg->msg_ubuf;
1069 if (sk->sk_route_caps & NETIF_F_SG)
1070 zc = MSG_ZEROCOPY;
1071 } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1072 skb = tcp_write_queue_tail(sk);
1073 uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb));
1074 if (!uarg) {
1075 err = -ENOBUFS;
1076 goto out_err;
1077 }
1078 if (sk->sk_route_caps & NETIF_F_SG)
1079 zc = MSG_ZEROCOPY;
1080 else
1081 uarg_to_msgzc(uarg)->zerocopy = 0;
1082 }
1083 } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) {
1084 if (sk->sk_route_caps & NETIF_F_SG)
1085 zc = MSG_SPLICE_PAGES;
1086 }
1087
1088 if (unlikely(flags & MSG_FASTOPEN ||
1089 inet_test_bit(DEFER_CONNECT, sk)) &&
1090 !tp->repair) {
1091 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
1092 if (err == -EINPROGRESS && copied_syn > 0)
1093 goto out;
1094 else if (err)
1095 goto out_err;
1096 }
1097
1098 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1099
1100 tcp_rate_check_app_limited(sk); /* is sending application-limited? */
1101
1102 /* Wait for a connection to finish. One exception is TCP Fast Open
1103 * (passive side) where data is allowed to be sent before a connection
1104 * is fully established.
1105 */
1106 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1107 !tcp_passive_fastopen(sk)) {
1108 err = sk_stream_wait_connect(sk, &timeo);
1109 if (err != 0)
1110 goto do_error;
1111 }
1112
1113 if (unlikely(tp->repair)) {
1114 if (tp->repair_queue == TCP_RECV_QUEUE) {
1115 copied = tcp_send_rcvq(sk, msg, size);
1116 goto out_nopush;
1117 }
1118
1119 err = -EINVAL;
1120 if (tp->repair_queue == TCP_NO_QUEUE)
1121 goto out_err;
1122
1123 /* 'common' sending to sendq */
1124 }
1125
1126 sockcm_init(&sockc, sk);
1127 if (msg->msg_controllen) {
1128 err = sock_cmsg_send(sk, msg, &sockc);
1129 if (unlikely(err)) {
1130 err = -EINVAL;
1131 goto out_err;
1132 }
1133 }
1134
1135 /* This should be in poll */
1136 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1137
1138 /* Ok commence sending. */
1139 copied = 0;
1140
1141restart:
1142 mss_now = tcp_send_mss(sk, &size_goal, flags);
1143
1144 err = -EPIPE;
1145 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1146 goto do_error;
1147
1148 while (msg_data_left(msg)) {
1149 ssize_t copy = 0;
1150
1151 skb = tcp_write_queue_tail(sk);
1152 if (skb)
1153 copy = size_goal - skb->len;
1154
1155 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
1156 bool first_skb;
1157
1158new_segment:
1159 if (!sk_stream_memory_free(sk))
1160 goto wait_for_space;
1161
1162 if (unlikely(process_backlog >= 16)) {
1163 process_backlog = 0;
1164 if (sk_flush_backlog(sk))
1165 goto restart;
1166 }
1167 first_skb = tcp_rtx_and_write_queues_empty(sk);
1168 skb = tcp_stream_alloc_skb(sk, sk->sk_allocation,
1169 first_skb);
1170 if (!skb)
1171 goto wait_for_space;
1172
1173 process_backlog++;
1174
1175#ifdef CONFIG_SKB_DECRYPTED
1176 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
1177#endif
1178 tcp_skb_entail(sk, skb);
1179 copy = size_goal;
1180
1181 /* All packets are restored as if they have
1182 * already been sent. skb_mstamp_ns isn't set to
1183 * avoid wrong rtt estimation.
1184 */
1185 if (tp->repair)
1186 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1187 }
1188
1189 /* Try to append data to the end of skb. */
1190 if (copy > msg_data_left(msg))
1191 copy = msg_data_left(msg);
1192
1193 if (zc == 0) {
1194 bool merge = true;
1195 int i = skb_shinfo(skb)->nr_frags;
1196 struct page_frag *pfrag = sk_page_frag(sk);
1197
1198 if (!sk_page_frag_refill(sk, pfrag))
1199 goto wait_for_space;
1200
1201 if (!skb_can_coalesce(skb, i, pfrag->page,
1202 pfrag->offset)) {
1203 if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
1204 tcp_mark_push(tp, skb);
1205 goto new_segment;
1206 }
1207 merge = false;
1208 }
1209
1210 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1211
1212 if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) {
1213 if (tcp_downgrade_zcopy_pure(sk, skb))
1214 goto wait_for_space;
1215 skb_zcopy_downgrade_managed(skb);
1216 }
1217
1218 copy = tcp_wmem_schedule(sk, copy);
1219 if (!copy)
1220 goto wait_for_space;
1221
1222 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1223 pfrag->page,
1224 pfrag->offset,
1225 copy);
1226 if (err)
1227 goto do_error;
1228
1229 /* Update the skb. */
1230 if (merge) {
1231 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1232 } else {
1233 skb_fill_page_desc(skb, i, pfrag->page,
1234 pfrag->offset, copy);
1235 page_ref_inc(pfrag->page);
1236 }
1237 pfrag->offset += copy;
1238 } else if (zc == MSG_ZEROCOPY) {
1239 /* First append to a fragless skb builds initial
1240 * pure zerocopy skb
1241 */
1242 if (!skb->len)
1243 skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
1244
1245 if (!skb_zcopy_pure(skb)) {
1246 copy = tcp_wmem_schedule(sk, copy);
1247 if (!copy)
1248 goto wait_for_space;
1249 }
1250
1251 err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
1252 if (err == -EMSGSIZE || err == -EEXIST) {
1253 tcp_mark_push(tp, skb);
1254 goto new_segment;
1255 }
1256 if (err < 0)
1257 goto do_error;
1258 copy = err;
1259 } else if (zc == MSG_SPLICE_PAGES) {
1260 /* Splice in data if we can; copy if we can't. */
1261 if (tcp_downgrade_zcopy_pure(sk, skb))
1262 goto wait_for_space;
1263 copy = tcp_wmem_schedule(sk, copy);
1264 if (!copy)
1265 goto wait_for_space;
1266
1267 err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1268 sk->sk_allocation);
1269 if (err < 0) {
1270 if (err == -EMSGSIZE) {
1271 tcp_mark_push(tp, skb);
1272 goto new_segment;
1273 }
1274 goto do_error;
1275 }
1276 copy = err;
1277
1278 if (!(flags & MSG_NO_SHARED_FRAGS))
1279 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
1280
1281 sk_wmem_queued_add(sk, copy);
1282 sk_mem_charge(sk, copy);
1283 }
1284
1285 if (!copied)
1286 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1287
1288 WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
1289 TCP_SKB_CB(skb)->end_seq += copy;
1290 tcp_skb_pcount_set(skb, 0);
1291
1292 copied += copy;
1293 if (!msg_data_left(msg)) {
1294 if (unlikely(flags & MSG_EOR))
1295 TCP_SKB_CB(skb)->eor = 1;
1296 goto out;
1297 }
1298
1299 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
1300 continue;
1301
1302 if (forced_push(tp)) {
1303 tcp_mark_push(tp, skb);
1304 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1305 } else if (skb == tcp_send_head(sk))
1306 tcp_push_one(sk, mss_now);
1307 continue;
1308
1309wait_for_space:
1310 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1311 tcp_remove_empty_skb(sk);
1312 if (copied)
1313 tcp_push(sk, flags & ~MSG_MORE, mss_now,
1314 TCP_NAGLE_PUSH, size_goal);
1315
1316 err = sk_stream_wait_memory(sk, &timeo);
1317 if (err != 0)
1318 goto do_error;
1319
1320 mss_now = tcp_send_mss(sk, &size_goal, flags);
1321 }
1322
1323out:
1324 if (copied) {
1325 tcp_tx_timestamp(sk, &sockc);
1326 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1327 }
1328out_nopush:
1329 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1330 if (uarg && !msg->msg_ubuf)
1331 net_zcopy_put(uarg);
1332 return copied + copied_syn;
1333
1334do_error:
1335 tcp_remove_empty_skb(sk);
1336
1337 if (copied + copied_syn)
1338 goto out;
1339out_err:
1340 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1341 if (uarg && !msg->msg_ubuf)
1342 net_zcopy_put_abort(uarg, true);
1343 err = sk_stream_error(sk, flags, err);
1344 /* make sure we wake any epoll edge trigger waiter */
1345 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
1346 sk->sk_write_space(sk);
1347 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1348 }
1349 return err;
1350}
1351EXPORT_SYMBOL_GPL(tcp_sendmsg_locked);
1352
1353int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1354{
1355 int ret;
1356
1357 lock_sock(sk);
1358 ret = tcp_sendmsg_locked(sk, msg, size);
1359 release_sock(sk);
1360
1361 return ret;
1362}
1363EXPORT_SYMBOL(tcp_sendmsg);
1364
1365void tcp_splice_eof(struct socket *sock)
1366{
1367 struct sock *sk = sock->sk;
1368 struct tcp_sock *tp = tcp_sk(sk);
1369 int mss_now, size_goal;
1370
1371 if (!tcp_write_queue_tail(sk))
1372 return;
1373
1374 lock_sock(sk);
1375 mss_now = tcp_send_mss(sk, &size_goal, 0);
1376 tcp_push(sk, 0, mss_now, tp->nonagle, size_goal);
1377 release_sock(sk);
1378}
1379EXPORT_SYMBOL_GPL(tcp_splice_eof);
1380
1381/*
1382 * Handle reading urgent data. BSD has very simple semantics for
1383 * this, no blocking and very strange errors 8)
1384 */
1385
1386static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1387{
1388 struct tcp_sock *tp = tcp_sk(sk);
1389
1390 /* No URG data to read. */
1391 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1392 tp->urg_data == TCP_URG_READ)
1393 return -EINVAL; /* Yes this is right ! */
1394
1395 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1396 return -ENOTCONN;
1397
1398 if (tp->urg_data & TCP_URG_VALID) {
1399 int err = 0;
1400 char c = tp->urg_data;
1401
1402 if (!(flags & MSG_PEEK))
1403 WRITE_ONCE(tp->urg_data, TCP_URG_READ);
1404
1405 /* Read urgent data. */
1406 msg->msg_flags |= MSG_OOB;
1407
1408 if (len > 0) {
1409 if (!(flags & MSG_TRUNC))
1410 err = memcpy_to_msg(msg, &c, 1);
1411 len = 1;
1412 } else
1413 msg->msg_flags |= MSG_TRUNC;
1414
1415 return err ? -EFAULT : len;
1416 }
1417
1418 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1419 return 0;
1420
1421 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1422 * the available implementations agree in this case:
1423 * this call should never block, independent of the
1424 * blocking state of the socket.
1425 * Mike <pall@rz.uni-karlsruhe.de>
1426 */
1427 return -EAGAIN;
1428}
1429
1430static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1431{
1432 struct sk_buff *skb;
1433 int copied = 0, err = 0;
1434
1435 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
1436 err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1437 if (err)
1438 return err;
1439 copied += skb->len;
1440 }
1441
1442 skb_queue_walk(&sk->sk_write_queue, skb) {
1443 err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1444 if (err)
1445 break;
1446
1447 copied += skb->len;
1448 }
1449
1450 return err ?: copied;
1451}
1452
1453/* Clean up the receive buffer for full frames taken by the user,
1454 * then send an ACK if necessary. COPIED is the number of bytes
1455 * tcp_recvmsg has given to the user so far, it speeds up the
1456 * calculation of whether or not we must ACK for the sake of
1457 * a window update.
1458 */
1459void __tcp_cleanup_rbuf(struct sock *sk, int copied)
1460{
1461 struct tcp_sock *tp = tcp_sk(sk);
1462 bool time_to_ack = false;
1463
1464 if (inet_csk_ack_scheduled(sk)) {
1465 const struct inet_connection_sock *icsk = inet_csk(sk);
1466
1467 if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
1468 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1469 /*
1470 * If this read emptied read buffer, we send ACK, if
1471 * connection is not bidirectional, user drained
1472 * receive buffer and there was a small segment
1473 * in queue.
1474 */
1475 (copied > 0 &&
1476 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1477 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1478 !inet_csk_in_pingpong_mode(sk))) &&
1479 !atomic_read(&sk->sk_rmem_alloc)))
1480 time_to_ack = true;
1481 }
1482
1483 /* We send an ACK if we can now advertise a non-zero window
1484 * which has been raised "significantly".
1485 *
1486 * Even if window raised up to infinity, do not send window open ACK
1487 * in states, where we will not receive more. It is useless.
1488 */
1489 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1490 __u32 rcv_window_now = tcp_receive_window(tp);
1491
1492 /* Optimize, __tcp_select_window() is not cheap. */
1493 if (2*rcv_window_now <= tp->window_clamp) {
1494 __u32 new_window = __tcp_select_window(sk);
1495
1496 /* Send ACK now, if this read freed lots of space
1497 * in our buffer. Certainly, new_window is new window.
1498 * We can advertise it now, if it is not less than current one.
1499 * "Lots" means "at least twice" here.
1500 */
1501 if (new_window && new_window >= 2 * rcv_window_now)
1502 time_to_ack = true;
1503 }
1504 }
1505 if (time_to_ack)
1506 tcp_send_ack(sk);
1507}
1508
1509void tcp_cleanup_rbuf(struct sock *sk, int copied)
1510{
1511 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1512 struct tcp_sock *tp = tcp_sk(sk);
1513
1514 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1515 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1516 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1517 __tcp_cleanup_rbuf(sk, copied);
1518}
1519
1520static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
1521{
1522 __skb_unlink(skb, &sk->sk_receive_queue);
1523 if (likely(skb->destructor == sock_rfree)) {
1524 sock_rfree(skb);
1525 skb->destructor = NULL;
1526 skb->sk = NULL;
1527 return skb_attempt_defer_free(skb);
1528 }
1529 __kfree_skb(skb);
1530}
1531
1532struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1533{
1534 struct sk_buff *skb;
1535 u32 offset;
1536
1537 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1538 offset = seq - TCP_SKB_CB(skb)->seq;
1539 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1540 pr_err_once("%s: found a SYN, please report !\n", __func__);
1541 offset--;
1542 }
1543 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
1544 *off = offset;
1545 return skb;
1546 }
1547 /* This looks weird, but this can happen if TCP collapsing
1548 * splitted a fat GRO packet, while we released socket lock
1549 * in skb_splice_bits()
1550 */
1551 tcp_eat_recv_skb(sk, skb);
1552 }
1553 return NULL;
1554}
1555EXPORT_SYMBOL(tcp_recv_skb);
1556
1557/*
1558 * This routine provides an alternative to tcp_recvmsg() for routines
1559 * that would like to handle copying from skbuffs directly in 'sendfile'
1560 * fashion.
1561 * Note:
1562 * - It is assumed that the socket was locked by the caller.
1563 * - The routine does not block.
1564 * - At present, there is no support for reading OOB data
1565 * or for 'peeking' the socket using this routine
1566 * (although both would be easy to implement).
1567 */
1568int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1569 sk_read_actor_t recv_actor)
1570{
1571 struct sk_buff *skb;
1572 struct tcp_sock *tp = tcp_sk(sk);
1573 u32 seq = tp->copied_seq;
1574 u32 offset;
1575 int copied = 0;
1576
1577 if (sk->sk_state == TCP_LISTEN)
1578 return -ENOTCONN;
1579 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1580 if (offset < skb->len) {
1581 int used;
1582 size_t len;
1583
1584 len = skb->len - offset;
1585 /* Stop reading if we hit a patch of urgent data */
1586 if (unlikely(tp->urg_data)) {
1587 u32 urg_offset = tp->urg_seq - seq;
1588 if (urg_offset < len)
1589 len = urg_offset;
1590 if (!len)
1591 break;
1592 }
1593 used = recv_actor(desc, skb, offset, len);
1594 if (used <= 0) {
1595 if (!copied)
1596 copied = used;
1597 break;
1598 }
1599 if (WARN_ON_ONCE(used > len))
1600 used = len;
1601 seq += used;
1602 copied += used;
1603 offset += used;
1604
1605 /* If recv_actor drops the lock (e.g. TCP splice
1606 * receive) the skb pointer might be invalid when
1607 * getting here: tcp_collapse might have deleted it
1608 * while aggregating skbs from the socket queue.
1609 */
1610 skb = tcp_recv_skb(sk, seq - 1, &offset);
1611 if (!skb)
1612 break;
1613 /* TCP coalescing might have appended data to the skb.
1614 * Try to splice more frags
1615 */
1616 if (offset + 1 != skb->len)
1617 continue;
1618 }
1619 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
1620 tcp_eat_recv_skb(sk, skb);
1621 ++seq;
1622 break;
1623 }
1624 tcp_eat_recv_skb(sk, skb);
1625 if (!desc->count)
1626 break;
1627 WRITE_ONCE(tp->copied_seq, seq);
1628 }
1629 WRITE_ONCE(tp->copied_seq, seq);
1630
1631 tcp_rcv_space_adjust(sk);
1632
1633 /* Clean up data we have read: This will do ACK frames. */
1634 if (copied > 0) {
1635 tcp_recv_skb(sk, seq, &offset);
1636 tcp_cleanup_rbuf(sk, copied);
1637 }
1638 return copied;
1639}
1640EXPORT_SYMBOL(tcp_read_sock);
1641
1642int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
1643{
1644 struct sk_buff *skb;
1645 int copied = 0;
1646
1647 if (sk->sk_state == TCP_LISTEN)
1648 return -ENOTCONN;
1649
1650 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1651 u8 tcp_flags;
1652 int used;
1653
1654 __skb_unlink(skb, &sk->sk_receive_queue);
1655 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
1656 tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
1657 used = recv_actor(sk, skb);
1658 if (used < 0) {
1659 if (!copied)
1660 copied = used;
1661 break;
1662 }
1663 copied += used;
1664
1665 if (tcp_flags & TCPHDR_FIN)
1666 break;
1667 }
1668 return copied;
1669}
1670EXPORT_SYMBOL(tcp_read_skb);
1671
1672void tcp_read_done(struct sock *sk, size_t len)
1673{
1674 struct tcp_sock *tp = tcp_sk(sk);
1675 u32 seq = tp->copied_seq;
1676 struct sk_buff *skb;
1677 size_t left;
1678 u32 offset;
1679
1680 if (sk->sk_state == TCP_LISTEN)
1681 return;
1682
1683 left = len;
1684 while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1685 int used;
1686
1687 used = min_t(size_t, skb->len - offset, left);
1688 seq += used;
1689 left -= used;
1690
1691 if (skb->len > offset + used)
1692 break;
1693
1694 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
1695 tcp_eat_recv_skb(sk, skb);
1696 ++seq;
1697 break;
1698 }
1699 tcp_eat_recv_skb(sk, skb);
1700 }
1701 WRITE_ONCE(tp->copied_seq, seq);
1702
1703 tcp_rcv_space_adjust(sk);
1704
1705 /* Clean up data we have read: This will do ACK frames. */
1706 if (left != len)
1707 tcp_cleanup_rbuf(sk, len - left);
1708}
1709EXPORT_SYMBOL(tcp_read_done);
1710
1711int tcp_peek_len(struct socket *sock)
1712{
1713 return tcp_inq(sock->sk);
1714}
1715EXPORT_SYMBOL(tcp_peek_len);
1716
1717/* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
1718int tcp_set_rcvlowat(struct sock *sk, int val)
1719{
1720 int space, cap;
1721
1722 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1723 cap = sk->sk_rcvbuf >> 1;
1724 else
1725 cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
1726 val = min(val, cap);
1727 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1728
1729 /* Check if we need to signal EPOLLIN right now */
1730 tcp_data_ready(sk);
1731
1732 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1733 return 0;
1734
1735 space = tcp_space_from_win(sk, val);
1736 if (space > sk->sk_rcvbuf) {
1737 WRITE_ONCE(sk->sk_rcvbuf, space);
1738 WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
1739 }
1740 return 0;
1741}
1742EXPORT_SYMBOL(tcp_set_rcvlowat);
1743
1744void tcp_update_recv_tstamps(struct sk_buff *skb,
1745 struct scm_timestamping_internal *tss)
1746{
1747 if (skb->tstamp)
1748 tss->ts[0] = ktime_to_timespec64(skb->tstamp);
1749 else
1750 tss->ts[0] = (struct timespec64) {0};
1751
1752 if (skb_hwtstamps(skb)->hwtstamp)
1753 tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
1754 else
1755 tss->ts[2] = (struct timespec64) {0};
1756}
1757
1758#ifdef CONFIG_MMU
1759static const struct vm_operations_struct tcp_vm_ops = {
1760};
1761
1762int tcp_mmap(struct file *file, struct socket *sock,
1763 struct vm_area_struct *vma)
1764{
1765 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
1766 return -EPERM;
1767 vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
1768
1769 /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
1770 vm_flags_set(vma, VM_MIXEDMAP);
1771
1772 vma->vm_ops = &tcp_vm_ops;
1773 return 0;
1774}
1775EXPORT_SYMBOL(tcp_mmap);
1776
1777static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
1778 u32 *offset_frag)
1779{
1780 skb_frag_t *frag;
1781
1782 if (unlikely(offset_skb >= skb->len))
1783 return NULL;
1784
1785 offset_skb -= skb_headlen(skb);
1786 if ((int)offset_skb < 0 || skb_has_frag_list(skb))
1787 return NULL;
1788
1789 frag = skb_shinfo(skb)->frags;
1790 while (offset_skb) {
1791 if (skb_frag_size(frag) > offset_skb) {
1792 *offset_frag = offset_skb;
1793 return frag;
1794 }
1795 offset_skb -= skb_frag_size(frag);
1796 ++frag;
1797 }
1798 *offset_frag = 0;
1799 return frag;
1800}
1801
1802static bool can_map_frag(const skb_frag_t *frag)
1803{
1804 struct page *page;
1805
1806 if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
1807 return false;
1808
1809 page = skb_frag_page(frag);
1810
1811 if (PageCompound(page) || page->mapping)
1812 return false;
1813
1814 return true;
1815}
1816
1817static int find_next_mappable_frag(const skb_frag_t *frag,
1818 int remaining_in_skb)
1819{
1820 int offset = 0;
1821
1822 if (likely(can_map_frag(frag)))
1823 return 0;
1824
1825 while (offset < remaining_in_skb && !can_map_frag(frag)) {
1826 offset += skb_frag_size(frag);
1827 ++frag;
1828 }
1829 return offset;
1830}
1831
1832static void tcp_zerocopy_set_hint_for_skb(struct sock *sk,
1833 struct tcp_zerocopy_receive *zc,
1834 struct sk_buff *skb, u32 offset)
1835{
1836 u32 frag_offset, partial_frag_remainder = 0;
1837 int mappable_offset;
1838 skb_frag_t *frag;
1839
1840 /* worst case: skip to next skb. try to improve on this case below */
1841 zc->recv_skip_hint = skb->len - offset;
1842
1843 /* Find the frag containing this offset (and how far into that frag) */
1844 frag = skb_advance_to_frag(skb, offset, &frag_offset);
1845 if (!frag)
1846 return;
1847
1848 if (frag_offset) {
1849 struct skb_shared_info *info = skb_shinfo(skb);
1850
1851 /* We read part of the last frag, must recvmsg() rest of skb. */
1852 if (frag == &info->frags[info->nr_frags - 1])
1853 return;
1854
1855 /* Else, we must at least read the remainder in this frag. */
1856 partial_frag_remainder = skb_frag_size(frag) - frag_offset;
1857 zc->recv_skip_hint -= partial_frag_remainder;
1858 ++frag;
1859 }
1860
1861 /* partial_frag_remainder: If part way through a frag, must read rest.
1862 * mappable_offset: Bytes till next mappable frag, *not* counting bytes
1863 * in partial_frag_remainder.
1864 */
1865 mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint);
1866 zc->recv_skip_hint = mappable_offset + partial_frag_remainder;
1867}
1868
1869static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
1870 int flags, struct scm_timestamping_internal *tss,
1871 int *cmsg_flags);
1872static int receive_fallback_to_copy(struct sock *sk,
1873 struct tcp_zerocopy_receive *zc, int inq,
1874 struct scm_timestamping_internal *tss)
1875{
1876 unsigned long copy_address = (unsigned long)zc->copybuf_address;
1877 struct msghdr msg = {};
1878 int err;
1879
1880 zc->length = 0;
1881 zc->recv_skip_hint = 0;
1882
1883 if (copy_address != zc->copybuf_address)
1884 return -EINVAL;
1885
1886 err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq,
1887 &msg.msg_iter);
1888 if (err)
1889 return err;
1890
1891 err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT,
1892 tss, &zc->msg_flags);
1893 if (err < 0)
1894 return err;
1895
1896 zc->copybuf_len = err;
1897 if (likely(zc->copybuf_len)) {
1898 struct sk_buff *skb;
1899 u32 offset;
1900
1901 skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset);
1902 if (skb)
1903 tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset);
1904 }
1905 return 0;
1906}
1907
1908static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
1909 struct sk_buff *skb, u32 copylen,
1910 u32 *offset, u32 *seq)
1911{
1912 unsigned long copy_address = (unsigned long)zc->copybuf_address;
1913 struct msghdr msg = {};
1914 int err;
1915
1916 if (copy_address != zc->copybuf_address)
1917 return -EINVAL;
1918
1919 err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen,
1920 &msg.msg_iter);
1921 if (err)
1922 return err;
1923 err = skb_copy_datagram_msg(skb, *offset, &msg, copylen);
1924 if (err)
1925 return err;
1926 zc->recv_skip_hint -= copylen;
1927 *offset += copylen;
1928 *seq += copylen;
1929 return (__s32)copylen;
1930}
1931
1932static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc,
1933 struct sock *sk,
1934 struct sk_buff *skb,
1935 u32 *seq,
1936 s32 copybuf_len,
1937 struct scm_timestamping_internal *tss)
1938{
1939 u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint);
1940
1941 if (!copylen)
1942 return 0;
1943 /* skb is null if inq < PAGE_SIZE. */
1944 if (skb) {
1945 offset = *seq - TCP_SKB_CB(skb)->seq;
1946 } else {
1947 skb = tcp_recv_skb(sk, *seq, &offset);
1948 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1949 tcp_update_recv_tstamps(skb, tss);
1950 zc->msg_flags |= TCP_CMSG_TS;
1951 }
1952 }
1953
1954 zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset,
1955 seq);
1956 return zc->copybuf_len < 0 ? 0 : copylen;
1957}
1958
1959static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
1960 struct page **pending_pages,
1961 unsigned long pages_remaining,
1962 unsigned long *address,
1963 u32 *length,
1964 u32 *seq,
1965 struct tcp_zerocopy_receive *zc,
1966 u32 total_bytes_to_map,
1967 int err)
1968{
1969 /* At least one page did not map. Try zapping if we skipped earlier. */
1970 if (err == -EBUSY &&
1971 zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) {
1972 u32 maybe_zap_len;
1973
1974 maybe_zap_len = total_bytes_to_map - /* All bytes to map */
1975 *length + /* Mapped or pending */
1976 (pages_remaining * PAGE_SIZE); /* Failed map. */
1977 zap_page_range_single(vma, *address, maybe_zap_len, NULL);
1978 err = 0;
1979 }
1980
1981 if (!err) {
1982 unsigned long leftover_pages = pages_remaining;
1983 int bytes_mapped;
1984
1985 /* We called zap_page_range_single, try to reinsert. */
1986 err = vm_insert_pages(vma, *address,
1987 pending_pages,
1988 &pages_remaining);
1989 bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining);
1990 *seq += bytes_mapped;
1991 *address += bytes_mapped;
1992 }
1993 if (err) {
1994 /* Either we were unable to zap, OR we zapped, retried an
1995 * insert, and still had an issue. Either ways, pages_remaining
1996 * is the number of pages we were unable to map, and we unroll
1997 * some state we speculatively touched before.
1998 */
1999 const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
2000
2001 *length -= bytes_not_mapped;
2002 zc->recv_skip_hint += bytes_not_mapped;
2003 }
2004 return err;
2005}
2006
2007static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
2008 struct page **pages,
2009 unsigned int pages_to_map,
2010 unsigned long *address,
2011 u32 *length,
2012 u32 *seq,
2013 struct tcp_zerocopy_receive *zc,
2014 u32 total_bytes_to_map)
2015{
2016 unsigned long pages_remaining = pages_to_map;
2017 unsigned int pages_mapped;
2018 unsigned int bytes_mapped;
2019 int err;
2020
2021 err = vm_insert_pages(vma, *address, pages, &pages_remaining);
2022 pages_mapped = pages_to_map - (unsigned int)pages_remaining;
2023 bytes_mapped = PAGE_SIZE * pages_mapped;
2024 /* Even if vm_insert_pages fails, it may have partially succeeded in
2025 * mapping (some but not all of the pages).
2026 */
2027 *seq += bytes_mapped;
2028 *address += bytes_mapped;
2029
2030 if (likely(!err))
2031 return 0;
2032
2033 /* Error: maybe zap and retry + rollback state for failed inserts. */
2034 return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped,
2035 pages_remaining, address, length, seq, zc, total_bytes_to_map,
2036 err);
2037}
2038
2039#define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS)
2040static void tcp_zc_finalize_rx_tstamp(struct sock *sk,
2041 struct tcp_zerocopy_receive *zc,
2042 struct scm_timestamping_internal *tss)
2043{
2044 unsigned long msg_control_addr;
2045 struct msghdr cmsg_dummy;
2046
2047 msg_control_addr = (unsigned long)zc->msg_control;
2048 cmsg_dummy.msg_control_user = (void __user *)msg_control_addr;
2049 cmsg_dummy.msg_controllen =
2050 (__kernel_size_t)zc->msg_controllen;
2051 cmsg_dummy.msg_flags = in_compat_syscall()
2052 ? MSG_CMSG_COMPAT : 0;
2053 cmsg_dummy.msg_control_is_user = true;
2054 zc->msg_flags = 0;
2055 if (zc->msg_control == msg_control_addr &&
2056 zc->msg_controllen == cmsg_dummy.msg_controllen) {
2057 tcp_recv_timestamp(&cmsg_dummy, sk, tss);
2058 zc->msg_control = (__u64)
2059 ((uintptr_t)cmsg_dummy.msg_control_user);
2060 zc->msg_controllen =
2061 (__u64)cmsg_dummy.msg_controllen;
2062 zc->msg_flags = (__u32)cmsg_dummy.msg_flags;
2063 }
2064}
2065
2066static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm,
2067 unsigned long address,
2068 bool *mmap_locked)
2069{
2070 struct vm_area_struct *vma = lock_vma_under_rcu(mm, address);
2071
2072 if (vma) {
2073 if (vma->vm_ops != &tcp_vm_ops) {
2074 vma_end_read(vma);
2075 return NULL;
2076 }
2077 *mmap_locked = false;
2078 return vma;
2079 }
2080
2081 mmap_read_lock(mm);
2082 vma = vma_lookup(mm, address);
2083 if (!vma || vma->vm_ops != &tcp_vm_ops) {
2084 mmap_read_unlock(mm);
2085 return NULL;
2086 }
2087 *mmap_locked = true;
2088 return vma;
2089}
2090
2091#define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32
2092static int tcp_zerocopy_receive(struct sock *sk,
2093 struct tcp_zerocopy_receive *zc,
2094 struct scm_timestamping_internal *tss)
2095{
2096 u32 length = 0, offset, vma_len, avail_len, copylen = 0;
2097 unsigned long address = (unsigned long)zc->address;
2098 struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE];
2099 s32 copybuf_len = zc->copybuf_len;
2100 struct tcp_sock *tp = tcp_sk(sk);
2101 const skb_frag_t *frags = NULL;
2102 unsigned int pages_to_map = 0;
2103 struct vm_area_struct *vma;
2104 struct sk_buff *skb = NULL;
2105 u32 seq = tp->copied_seq;
2106 u32 total_bytes_to_map;
2107 int inq = tcp_inq(sk);
2108 bool mmap_locked;
2109 int ret;
2110
2111 zc->copybuf_len = 0;
2112 zc->msg_flags = 0;
2113
2114 if (address & (PAGE_SIZE - 1) || address != zc->address)
2115 return -EINVAL;
2116
2117 if (sk->sk_state == TCP_LISTEN)
2118 return -ENOTCONN;
2119
2120 sock_rps_record_flow(sk);
2121
2122 if (inq && inq <= copybuf_len)
2123 return receive_fallback_to_copy(sk, zc, inq, tss);
2124
2125 if (inq < PAGE_SIZE) {
2126 zc->length = 0;
2127 zc->recv_skip_hint = inq;
2128 if (!inq && sock_flag(sk, SOCK_DONE))
2129 return -EIO;
2130 return 0;
2131 }
2132
2133 vma = find_tcp_vma(current->mm, address, &mmap_locked);
2134 if (!vma)
2135 return -EINVAL;
2136
2137 vma_len = min_t(unsigned long, zc->length, vma->vm_end - address);
2138 avail_len = min_t(u32, vma_len, inq);
2139 total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
2140 if (total_bytes_to_map) {
2141 if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
2142 zap_page_range_single(vma, address, total_bytes_to_map,
2143 NULL);
2144 zc->length = total_bytes_to_map;
2145 zc->recv_skip_hint = 0;
2146 } else {
2147 zc->length = avail_len;
2148 zc->recv_skip_hint = avail_len;
2149 }
2150 ret = 0;
2151 while (length + PAGE_SIZE <= zc->length) {
2152 int mappable_offset;
2153 struct page *page;
2154
2155 if (zc->recv_skip_hint < PAGE_SIZE) {
2156 u32 offset_frag;
2157
2158 if (skb) {
2159 if (zc->recv_skip_hint > 0)
2160 break;
2161 skb = skb->next;
2162 offset = seq - TCP_SKB_CB(skb)->seq;
2163 } else {
2164 skb = tcp_recv_skb(sk, seq, &offset);
2165 }
2166
2167 if (!skb_frags_readable(skb))
2168 break;
2169
2170 if (TCP_SKB_CB(skb)->has_rxtstamp) {
2171 tcp_update_recv_tstamps(skb, tss);
2172 zc->msg_flags |= TCP_CMSG_TS;
2173 }
2174 zc->recv_skip_hint = skb->len - offset;
2175 frags = skb_advance_to_frag(skb, offset, &offset_frag);
2176 if (!frags || offset_frag)
2177 break;
2178 }
2179
2180 mappable_offset = find_next_mappable_frag(frags,
2181 zc->recv_skip_hint);
2182 if (mappable_offset) {
2183 zc->recv_skip_hint = mappable_offset;
2184 break;
2185 }
2186 page = skb_frag_page(frags);
2187 if (WARN_ON_ONCE(!page))
2188 break;
2189
2190 prefetchw(page);
2191 pages[pages_to_map++] = page;
2192 length += PAGE_SIZE;
2193 zc->recv_skip_hint -= PAGE_SIZE;
2194 frags++;
2195 if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE ||
2196 zc->recv_skip_hint < PAGE_SIZE) {
2197 /* Either full batch, or we're about to go to next skb
2198 * (and we cannot unroll failed ops across skbs).
2199 */
2200 ret = tcp_zerocopy_vm_insert_batch(vma, pages,
2201 pages_to_map,
2202 &address, &length,
2203 &seq, zc,
2204 total_bytes_to_map);
2205 if (ret)
2206 goto out;
2207 pages_to_map = 0;
2208 }
2209 }
2210 if (pages_to_map) {
2211 ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map,
2212 &address, &length, &seq,
2213 zc, total_bytes_to_map);
2214 }
2215out:
2216 if (mmap_locked)
2217 mmap_read_unlock(current->mm);
2218 else
2219 vma_end_read(vma);
2220 /* Try to copy straggler data. */
2221 if (!ret)
2222 copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss);
2223
2224 if (length + copylen) {
2225 WRITE_ONCE(tp->copied_seq, seq);
2226 tcp_rcv_space_adjust(sk);
2227
2228 /* Clean up data we have read: This will do ACK frames. */
2229 tcp_recv_skb(sk, seq, &offset);
2230 tcp_cleanup_rbuf(sk, length + copylen);
2231 ret = 0;
2232 if (length == zc->length)
2233 zc->recv_skip_hint = 0;
2234 } else {
2235 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
2236 ret = -EIO;
2237 }
2238 zc->length = length;
2239 return ret;
2240}
2241#endif
2242
2243/* Similar to __sock_recv_timestamp, but does not require an skb */
2244void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
2245 struct scm_timestamping_internal *tss)
2246{
2247 int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
2248 u32 tsflags = READ_ONCE(sk->sk_tsflags);
2249 bool has_timestamping = false;
2250
2251 if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
2252 if (sock_flag(sk, SOCK_RCVTSTAMP)) {
2253 if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
2254 if (new_tstamp) {
2255 struct __kernel_timespec kts = {
2256 .tv_sec = tss->ts[0].tv_sec,
2257 .tv_nsec = tss->ts[0].tv_nsec,
2258 };
2259 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
2260 sizeof(kts), &kts);
2261 } else {
2262 struct __kernel_old_timespec ts_old = {
2263 .tv_sec = tss->ts[0].tv_sec,
2264 .tv_nsec = tss->ts[0].tv_nsec,
2265 };
2266 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
2267 sizeof(ts_old), &ts_old);
2268 }
2269 } else {
2270 if (new_tstamp) {
2271 struct __kernel_sock_timeval stv = {
2272 .tv_sec = tss->ts[0].tv_sec,
2273 .tv_usec = tss->ts[0].tv_nsec / 1000,
2274 };
2275 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
2276 sizeof(stv), &stv);
2277 } else {
2278 struct __kernel_old_timeval tv = {
2279 .tv_sec = tss->ts[0].tv_sec,
2280 .tv_usec = tss->ts[0].tv_nsec / 1000,
2281 };
2282 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
2283 sizeof(tv), &tv);
2284 }
2285 }
2286 }
2287
2288 if (tsflags & SOF_TIMESTAMPING_SOFTWARE &&
2289 (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
2290 !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
2291 has_timestamping = true;
2292 else
2293 tss->ts[0] = (struct timespec64) {0};
2294 }
2295
2296 if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
2297 if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
2298 (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
2299 !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
2300 has_timestamping = true;
2301 else
2302 tss->ts[2] = (struct timespec64) {0};
2303 }
2304
2305 if (has_timestamping) {
2306 tss->ts[1] = (struct timespec64) {0};
2307 if (sock_flag(sk, SOCK_TSTAMP_NEW))
2308 put_cmsg_scm_timestamping64(msg, tss);
2309 else
2310 put_cmsg_scm_timestamping(msg, tss);
2311 }
2312}
2313
2314static int tcp_inq_hint(struct sock *sk)
2315{
2316 const struct tcp_sock *tp = tcp_sk(sk);
2317 u32 copied_seq = READ_ONCE(tp->copied_seq);
2318 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
2319 int inq;
2320
2321 inq = rcv_nxt - copied_seq;
2322 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
2323 lock_sock(sk);
2324 inq = tp->rcv_nxt - tp->copied_seq;
2325 release_sock(sk);
2326 }
2327 /* After receiving a FIN, tell the user-space to continue reading
2328 * by returning a non-zero inq.
2329 */
2330 if (inq == 0 && sock_flag(sk, SOCK_DONE))
2331 inq = 1;
2332 return inq;
2333}
2334
2335/* batch __xa_alloc() calls and reduce xa_lock()/xa_unlock() overhead. */
2336struct tcp_xa_pool {
2337 u8 max; /* max <= MAX_SKB_FRAGS */
2338 u8 idx; /* idx <= max */
2339 __u32 tokens[MAX_SKB_FRAGS];
2340 netmem_ref netmems[MAX_SKB_FRAGS];
2341};
2342
2343static void tcp_xa_pool_commit_locked(struct sock *sk, struct tcp_xa_pool *p)
2344{
2345 int i;
2346
2347 /* Commit part that has been copied to user space. */
2348 for (i = 0; i < p->idx; i++)
2349 __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY,
2350 (__force void *)p->netmems[i], GFP_KERNEL);
2351 /* Rollback what has been pre-allocated and is no longer needed. */
2352 for (; i < p->max; i++)
2353 __xa_erase(&sk->sk_user_frags, p->tokens[i]);
2354
2355 p->max = 0;
2356 p->idx = 0;
2357}
2358
2359static void tcp_xa_pool_commit(struct sock *sk, struct tcp_xa_pool *p)
2360{
2361 if (!p->max)
2362 return;
2363
2364 xa_lock_bh(&sk->sk_user_frags);
2365
2366 tcp_xa_pool_commit_locked(sk, p);
2367
2368 xa_unlock_bh(&sk->sk_user_frags);
2369}
2370
2371static int tcp_xa_pool_refill(struct sock *sk, struct tcp_xa_pool *p,
2372 unsigned int max_frags)
2373{
2374 int err, k;
2375
2376 if (p->idx < p->max)
2377 return 0;
2378
2379 xa_lock_bh(&sk->sk_user_frags);
2380
2381 tcp_xa_pool_commit_locked(sk, p);
2382
2383 for (k = 0; k < max_frags; k++) {
2384 err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k],
2385 XA_ZERO_ENTRY, xa_limit_31b, GFP_KERNEL);
2386 if (err)
2387 break;
2388 }
2389
2390 xa_unlock_bh(&sk->sk_user_frags);
2391
2392 p->max = k;
2393 p->idx = 0;
2394 return k ? 0 : err;
2395}
2396
2397/* On error, returns the -errno. On success, returns number of bytes sent to the
2398 * user. May not consume all of @remaining_len.
2399 */
2400static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
2401 unsigned int offset, struct msghdr *msg,
2402 int remaining_len)
2403{
2404 struct dmabuf_cmsg dmabuf_cmsg = { 0 };
2405 struct tcp_xa_pool tcp_xa_pool;
2406 unsigned int start;
2407 int i, copy, n;
2408 int sent = 0;
2409 int err = 0;
2410
2411 tcp_xa_pool.max = 0;
2412 tcp_xa_pool.idx = 0;
2413 do {
2414 start = skb_headlen(skb);
2415
2416 if (skb_frags_readable(skb)) {
2417 err = -ENODEV;
2418 goto out;
2419 }
2420
2421 /* Copy header. */
2422 copy = start - offset;
2423 if (copy > 0) {
2424 copy = min(copy, remaining_len);
2425
2426 n = copy_to_iter(skb->data + offset, copy,
2427 &msg->msg_iter);
2428 if (n != copy) {
2429 err = -EFAULT;
2430 goto out;
2431 }
2432
2433 offset += copy;
2434 remaining_len -= copy;
2435
2436 /* First a dmabuf_cmsg for # bytes copied to user
2437 * buffer.
2438 */
2439 memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
2440 dmabuf_cmsg.frag_size = copy;
2441 err = put_cmsg_notrunc(msg, SOL_SOCKET,
2442 SO_DEVMEM_LINEAR,
2443 sizeof(dmabuf_cmsg),
2444 &dmabuf_cmsg);
2445 if (err)
2446 goto out;
2447
2448 sent += copy;
2449
2450 if (remaining_len == 0)
2451 goto out;
2452 }
2453
2454 /* after that, send information of dmabuf pages through a
2455 * sequence of cmsg
2456 */
2457 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2458 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2459 struct net_iov *niov;
2460 u64 frag_offset;
2461 int end;
2462
2463 /* !skb_frags_readable() should indicate that ALL the
2464 * frags in this skb are dmabuf net_iovs. We're checking
2465 * for that flag above, but also check individual frags
2466 * here. If the tcp stack is not setting
2467 * skb_frags_readable() correctly, we still don't want
2468 * to crash here.
2469 */
2470 if (!skb_frag_net_iov(frag)) {
2471 net_err_ratelimited("Found non-dmabuf skb with net_iov");
2472 err = -ENODEV;
2473 goto out;
2474 }
2475
2476 niov = skb_frag_net_iov(frag);
2477 end = start + skb_frag_size(frag);
2478 copy = end - offset;
2479
2480 if (copy > 0) {
2481 copy = min(copy, remaining_len);
2482
2483 frag_offset = net_iov_virtual_addr(niov) +
2484 skb_frag_off(frag) + offset -
2485 start;
2486 dmabuf_cmsg.frag_offset = frag_offset;
2487 dmabuf_cmsg.frag_size = copy;
2488 err = tcp_xa_pool_refill(sk, &tcp_xa_pool,
2489 skb_shinfo(skb)->nr_frags - i);
2490 if (err)
2491 goto out;
2492
2493 /* Will perform the exchange later */
2494 dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx];
2495 dmabuf_cmsg.dmabuf_id = net_iov_binding_id(niov);
2496
2497 offset += copy;
2498 remaining_len -= copy;
2499
2500 err = put_cmsg_notrunc(msg, SOL_SOCKET,
2501 SO_DEVMEM_DMABUF,
2502 sizeof(dmabuf_cmsg),
2503 &dmabuf_cmsg);
2504 if (err)
2505 goto out;
2506
2507 atomic_long_inc(&niov->pp_ref_count);
2508 tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
2509
2510 sent += copy;
2511
2512 if (remaining_len == 0)
2513 goto out;
2514 }
2515 start = end;
2516 }
2517
2518 tcp_xa_pool_commit(sk, &tcp_xa_pool);
2519 if (!remaining_len)
2520 goto out;
2521
2522 /* if remaining_len is not satisfied yet, we need to go to the
2523 * next frag in the frag_list to satisfy remaining_len.
2524 */
2525 skb = skb_shinfo(skb)->frag_list ?: skb->next;
2526
2527 offset = offset - start;
2528 } while (skb);
2529
2530 if (remaining_len) {
2531 err = -EFAULT;
2532 goto out;
2533 }
2534
2535out:
2536 tcp_xa_pool_commit(sk, &tcp_xa_pool);
2537 if (!sent)
2538 sent = err;
2539
2540 return sent;
2541}
2542
2543/*
2544 * This routine copies from a sock struct into the user buffer.
2545 *
2546 * Technical note: in 2.3 we work on _locked_ socket, so that
2547 * tricks with *seq access order and skb->users are not required.
2548 * Probably, code can be easily improved even more.
2549 */
2550
2551static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
2552 int flags, struct scm_timestamping_internal *tss,
2553 int *cmsg_flags)
2554{
2555 struct tcp_sock *tp = tcp_sk(sk);
2556 int last_copied_dmabuf = -1; /* uninitialized */
2557 int copied = 0;
2558 u32 peek_seq;
2559 u32 *seq;
2560 unsigned long used;
2561 int err;
2562 int target; /* Read at least this many bytes */
2563 long timeo;
2564 struct sk_buff *skb, *last;
2565 u32 peek_offset = 0;
2566 u32 urg_hole = 0;
2567
2568 err = -ENOTCONN;
2569 if (sk->sk_state == TCP_LISTEN)
2570 goto out;
2571
2572 if (tp->recvmsg_inq) {
2573 *cmsg_flags = TCP_CMSG_INQ;
2574 msg->msg_get_inq = 1;
2575 }
2576 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2577
2578 /* Urgent data needs to be handled specially. */
2579 if (flags & MSG_OOB)
2580 goto recv_urg;
2581
2582 if (unlikely(tp->repair)) {
2583 err = -EPERM;
2584 if (!(flags & MSG_PEEK))
2585 goto out;
2586
2587 if (tp->repair_queue == TCP_SEND_QUEUE)
2588 goto recv_sndq;
2589
2590 err = -EINVAL;
2591 if (tp->repair_queue == TCP_NO_QUEUE)
2592 goto out;
2593
2594 /* 'common' recv queue MSG_PEEK-ing */
2595 }
2596
2597 seq = &tp->copied_seq;
2598 if (flags & MSG_PEEK) {
2599 peek_offset = max(sk_peek_offset(sk, flags), 0);
2600 peek_seq = tp->copied_seq + peek_offset;
2601 seq = &peek_seq;
2602 }
2603
2604 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2605
2606 do {
2607 u32 offset;
2608
2609 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2610 if (unlikely(tp->urg_data) && tp->urg_seq == *seq) {
2611 if (copied)
2612 break;
2613 if (signal_pending(current)) {
2614 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
2615 break;
2616 }
2617 }
2618
2619 /* Next get a buffer. */
2620
2621 last = skb_peek_tail(&sk->sk_receive_queue);
2622 skb_queue_walk(&sk->sk_receive_queue, skb) {
2623 last = skb;
2624 /* Now that we have two receive queues this
2625 * shouldn't happen.
2626 */
2627 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2628 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
2629 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2630 flags))
2631 break;
2632
2633 offset = *seq - TCP_SKB_CB(skb)->seq;
2634 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2635 pr_err_once("%s: found a SYN, please report !\n", __func__);
2636 offset--;
2637 }
2638 if (offset < skb->len)
2639 goto found_ok_skb;
2640 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2641 goto found_fin_ok;
2642 WARN(!(flags & MSG_PEEK),
2643 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
2644 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
2645 }
2646
2647 /* Well, if we have backlog, try to process it now yet. */
2648
2649 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
2650 break;
2651
2652 if (copied) {
2653 if (!timeo ||
2654 sk->sk_err ||
2655 sk->sk_state == TCP_CLOSE ||
2656 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2657 signal_pending(current))
2658 break;
2659 } else {
2660 if (sock_flag(sk, SOCK_DONE))
2661 break;
2662
2663 if (sk->sk_err) {
2664 copied = sock_error(sk);
2665 break;
2666 }
2667
2668 if (sk->sk_shutdown & RCV_SHUTDOWN)
2669 break;
2670
2671 if (sk->sk_state == TCP_CLOSE) {
2672 /* This occurs when user tries to read
2673 * from never connected socket.
2674 */
2675 copied = -ENOTCONN;
2676 break;
2677 }
2678
2679 if (!timeo) {
2680 copied = -EAGAIN;
2681 break;
2682 }
2683
2684 if (signal_pending(current)) {
2685 copied = sock_intr_errno(timeo);
2686 break;
2687 }
2688 }
2689
2690 if (copied >= target) {
2691 /* Do not sleep, just process backlog. */
2692 __sk_flush_backlog(sk);
2693 } else {
2694 tcp_cleanup_rbuf(sk, copied);
2695 err = sk_wait_data(sk, &timeo, last);
2696 if (err < 0) {
2697 err = copied ? : err;
2698 goto out;
2699 }
2700 }
2701
2702 if ((flags & MSG_PEEK) &&
2703 (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) {
2704 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2705 current->comm,
2706 task_pid_nr(current));
2707 peek_seq = tp->copied_seq + peek_offset;
2708 }
2709 continue;
2710
2711found_ok_skb:
2712 /* Ok so how much can we use? */
2713 used = skb->len - offset;
2714 if (len < used)
2715 used = len;
2716
2717 /* Do we have urgent data here? */
2718 if (unlikely(tp->urg_data)) {
2719 u32 urg_offset = tp->urg_seq - *seq;
2720 if (urg_offset < used) {
2721 if (!urg_offset) {
2722 if (!sock_flag(sk, SOCK_URGINLINE)) {
2723 WRITE_ONCE(*seq, *seq + 1);
2724 urg_hole++;
2725 offset++;
2726 used--;
2727 if (!used)
2728 goto skip_copy;
2729 }
2730 } else
2731 used = urg_offset;
2732 }
2733 }
2734
2735 if (!(flags & MSG_TRUNC)) {
2736 if (last_copied_dmabuf != -1 &&
2737 last_copied_dmabuf != !skb_frags_readable(skb))
2738 break;
2739
2740 if (skb_frags_readable(skb)) {
2741 err = skb_copy_datagram_msg(skb, offset, msg,
2742 used);
2743 if (err) {
2744 /* Exception. Bailout! */
2745 if (!copied)
2746 copied = -EFAULT;
2747 break;
2748 }
2749 } else {
2750 if (!(flags & MSG_SOCK_DEVMEM)) {
2751 /* dmabuf skbs can only be received
2752 * with the MSG_SOCK_DEVMEM flag.
2753 */
2754 if (!copied)
2755 copied = -EFAULT;
2756
2757 break;
2758 }
2759
2760 err = tcp_recvmsg_dmabuf(sk, skb, offset, msg,
2761 used);
2762 if (err <= 0) {
2763 if (!copied)
2764 copied = -EFAULT;
2765
2766 break;
2767 }
2768 used = err;
2769 }
2770 }
2771
2772 last_copied_dmabuf = !skb_frags_readable(skb);
2773
2774 WRITE_ONCE(*seq, *seq + used);
2775 copied += used;
2776 len -= used;
2777 if (flags & MSG_PEEK)
2778 sk_peek_offset_fwd(sk, used);
2779 else
2780 sk_peek_offset_bwd(sk, used);
2781 tcp_rcv_space_adjust(sk);
2782
2783skip_copy:
2784 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) {
2785 WRITE_ONCE(tp->urg_data, 0);
2786 tcp_fast_path_check(sk);
2787 }
2788
2789 if (TCP_SKB_CB(skb)->has_rxtstamp) {
2790 tcp_update_recv_tstamps(skb, tss);
2791 *cmsg_flags |= TCP_CMSG_TS;
2792 }
2793
2794 if (used + offset < skb->len)
2795 continue;
2796
2797 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2798 goto found_fin_ok;
2799 if (!(flags & MSG_PEEK))
2800 tcp_eat_recv_skb(sk, skb);
2801 continue;
2802
2803found_fin_ok:
2804 /* Process the FIN. */
2805 WRITE_ONCE(*seq, *seq + 1);
2806 if (!(flags & MSG_PEEK))
2807 tcp_eat_recv_skb(sk, skb);
2808 break;
2809 } while (len > 0);
2810
2811 /* According to UNIX98, msg_name/msg_namelen are ignored
2812 * on connected socket. I was just happy when found this 8) --ANK
2813 */
2814
2815 /* Clean up data we have read: This will do ACK frames. */
2816 tcp_cleanup_rbuf(sk, copied);
2817 return copied;
2818
2819out:
2820 return err;
2821
2822recv_urg:
2823 err = tcp_recv_urg(sk, msg, len, flags);
2824 goto out;
2825
2826recv_sndq:
2827 err = tcp_peek_sndq(sk, msg, len);
2828 goto out;
2829}
2830
2831int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
2832 int *addr_len)
2833{
2834 int cmsg_flags = 0, ret;
2835 struct scm_timestamping_internal tss;
2836
2837 if (unlikely(flags & MSG_ERRQUEUE))
2838 return inet_recv_error(sk, msg, len, addr_len);
2839
2840 if (sk_can_busy_loop(sk) &&
2841 skb_queue_empty_lockless(&sk->sk_receive_queue) &&
2842 sk->sk_state == TCP_ESTABLISHED)
2843 sk_busy_loop(sk, flags & MSG_DONTWAIT);
2844
2845 lock_sock(sk);
2846 ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags);
2847 release_sock(sk);
2848
2849 if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) {
2850 if (cmsg_flags & TCP_CMSG_TS)
2851 tcp_recv_timestamp(msg, sk, &tss);
2852 if (msg->msg_get_inq) {
2853 msg->msg_inq = tcp_inq_hint(sk);
2854 if (cmsg_flags & TCP_CMSG_INQ)
2855 put_cmsg(msg, SOL_TCP, TCP_CM_INQ,
2856 sizeof(msg->msg_inq), &msg->msg_inq);
2857 }
2858 }
2859 return ret;
2860}
2861EXPORT_SYMBOL(tcp_recvmsg);
2862
2863void tcp_set_state(struct sock *sk, int state)
2864{
2865 int oldstate = sk->sk_state;
2866
2867 /* We defined a new enum for TCP states that are exported in BPF
2868 * so as not force the internal TCP states to be frozen. The
2869 * following checks will detect if an internal state value ever
2870 * differs from the BPF value. If this ever happens, then we will
2871 * need to remap the internal value to the BPF value before calling
2872 * tcp_call_bpf_2arg.
2873 */
2874 BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
2875 BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
2876 BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
2877 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
2878 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
2879 BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
2880 BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
2881 BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
2882 BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
2883 BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
2884 BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
2885 BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
2886 BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE);
2887 BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
2888
2889 /* bpf uapi header bpf.h defines an anonymous enum with values
2890 * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux
2891 * is able to emit this enum in DWARF due to the above BUILD_BUG_ON.
2892 * But clang built vmlinux does not have this enum in DWARF
2893 * since clang removes the above code before generating IR/debuginfo.
2894 * Let us explicitly emit the type debuginfo to ensure the
2895 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF
2896 * regardless of which compiler is used.
2897 */
2898 BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED);
2899
2900 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
2901 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
2902
2903 switch (state) {
2904 case TCP_ESTABLISHED:
2905 if (oldstate != TCP_ESTABLISHED)
2906 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2907 break;
2908 case TCP_CLOSE_WAIT:
2909 if (oldstate == TCP_SYN_RECV)
2910 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2911 break;
2912
2913 case TCP_CLOSE:
2914 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2915 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2916
2917 sk->sk_prot->unhash(sk);
2918 if (inet_csk(sk)->icsk_bind_hash &&
2919 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2920 inet_put_port(sk);
2921 fallthrough;
2922 default:
2923 if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
2924 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2925 }
2926
2927 /* Change state AFTER socket is unhashed to avoid closed
2928 * socket sitting in hash tables.
2929 */
2930 inet_sk_state_store(sk, state);
2931}
2932EXPORT_SYMBOL_GPL(tcp_set_state);
2933
2934/*
2935 * State processing on a close. This implements the state shift for
2936 * sending our FIN frame. Note that we only send a FIN for some
2937 * states. A shutdown() may have already sent the FIN, or we may be
2938 * closed.
2939 */
2940
2941static const unsigned char new_state[16] = {
2942 /* current state: new state: action: */
2943 [0 /* (Invalid) */] = TCP_CLOSE,
2944 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2945 [TCP_SYN_SENT] = TCP_CLOSE,
2946 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2947 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
2948 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
2949 [TCP_TIME_WAIT] = TCP_CLOSE,
2950 [TCP_CLOSE] = TCP_CLOSE,
2951 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
2952 [TCP_LAST_ACK] = TCP_LAST_ACK,
2953 [TCP_LISTEN] = TCP_CLOSE,
2954 [TCP_CLOSING] = TCP_CLOSING,
2955 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
2956};
2957
2958static int tcp_close_state(struct sock *sk)
2959{
2960 int next = (int)new_state[sk->sk_state];
2961 int ns = next & TCP_STATE_MASK;
2962
2963 tcp_set_state(sk, ns);
2964
2965 return next & TCP_ACTION_FIN;
2966}
2967
2968/*
2969 * Shutdown the sending side of a connection. Much like close except
2970 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2971 */
2972
2973void tcp_shutdown(struct sock *sk, int how)
2974{
2975 /* We need to grab some memory, and put together a FIN,
2976 * and then put it into the queue to be sent.
2977 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2978 */
2979 if (!(how & SEND_SHUTDOWN))
2980 return;
2981
2982 /* If we've already sent a FIN, or it's a closed state, skip this. */
2983 if ((1 << sk->sk_state) &
2984 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2985 TCPF_CLOSE_WAIT)) {
2986 /* Clear out any half completed packets. FIN if needed. */
2987 if (tcp_close_state(sk))
2988 tcp_send_fin(sk);
2989 }
2990}
2991EXPORT_SYMBOL(tcp_shutdown);
2992
2993int tcp_orphan_count_sum(void)
2994{
2995 int i, total = 0;
2996
2997 for_each_possible_cpu(i)
2998 total += per_cpu(tcp_orphan_count, i);
2999
3000 return max(total, 0);
3001}
3002
3003static int tcp_orphan_cache;
3004static struct timer_list tcp_orphan_timer;
3005#define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
3006
3007static void tcp_orphan_update(struct timer_list *unused)
3008{
3009 WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
3010 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
3011}
3012
3013static bool tcp_too_many_orphans(int shift)
3014{
3015 return READ_ONCE(tcp_orphan_cache) << shift >
3016 READ_ONCE(sysctl_tcp_max_orphans);
3017}
3018
3019static bool tcp_out_of_memory(const struct sock *sk)
3020{
3021 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
3022 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
3023 return true;
3024 return false;
3025}
3026
3027bool tcp_check_oom(const struct sock *sk, int shift)
3028{
3029 bool too_many_orphans, out_of_socket_memory;
3030
3031 too_many_orphans = tcp_too_many_orphans(shift);
3032 out_of_socket_memory = tcp_out_of_memory(sk);
3033
3034 if (too_many_orphans)
3035 net_info_ratelimited("too many orphaned sockets\n");
3036 if (out_of_socket_memory)
3037 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
3038 return too_many_orphans || out_of_socket_memory;
3039}
3040
3041void __tcp_close(struct sock *sk, long timeout)
3042{
3043 struct sk_buff *skb;
3044 int data_was_unread = 0;
3045 int state;
3046
3047 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
3048
3049 if (sk->sk_state == TCP_LISTEN) {
3050 tcp_set_state(sk, TCP_CLOSE);
3051
3052 /* Special case. */
3053 inet_csk_listen_stop(sk);
3054
3055 goto adjudge_to_death;
3056 }
3057
3058 /* We need to flush the recv. buffs. We do this only on the
3059 * descriptor close, not protocol-sourced closes, because the
3060 * reader process may not have drained the data yet!
3061 */
3062 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
3063 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
3064
3065 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
3066 len--;
3067 data_was_unread += len;
3068 __kfree_skb(skb);
3069 }
3070
3071 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
3072 if (sk->sk_state == TCP_CLOSE)
3073 goto adjudge_to_death;
3074
3075 /* As outlined in RFC 2525, section 2.17, we send a RST here because
3076 * data was lost. To witness the awful effects of the old behavior of
3077 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
3078 * GET in an FTP client, suspend the process, wait for the client to
3079 * advertise a zero window, then kill -9 the FTP client, wheee...
3080 * Note: timeout is always zero in such a case.
3081 */
3082 if (unlikely(tcp_sk(sk)->repair)) {
3083 sk->sk_prot->disconnect(sk, 0);
3084 } else if (data_was_unread) {
3085 /* Unread data was tossed, zap the connection. */
3086 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
3087 tcp_set_state(sk, TCP_CLOSE);
3088 tcp_send_active_reset(sk, sk->sk_allocation,
3089 SK_RST_REASON_TCP_ABORT_ON_CLOSE);
3090 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
3091 /* Check zero linger _after_ checking for unread data. */
3092 sk->sk_prot->disconnect(sk, 0);
3093 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
3094 } else if (tcp_close_state(sk)) {
3095 /* We FIN if the application ate all the data before
3096 * zapping the connection.
3097 */
3098
3099 /* RED-PEN. Formally speaking, we have broken TCP state
3100 * machine. State transitions:
3101 *
3102 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
3103 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult)
3104 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
3105 *
3106 * are legal only when FIN has been sent (i.e. in window),
3107 * rather than queued out of window. Purists blame.
3108 *
3109 * F.e. "RFC state" is ESTABLISHED,
3110 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
3111 *
3112 * The visible declinations are that sometimes
3113 * we enter time-wait state, when it is not required really
3114 * (harmless), do not send active resets, when they are
3115 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
3116 * they look as CLOSING or LAST_ACK for Linux)
3117 * Probably, I missed some more holelets.
3118 * --ANK
3119 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
3120 * in a single packet! (May consider it later but will
3121 * probably need API support or TCP_CORK SYN-ACK until
3122 * data is written and socket is closed.)
3123 */
3124 tcp_send_fin(sk);
3125 }
3126
3127 sk_stream_wait_close(sk, timeout);
3128
3129adjudge_to_death:
3130 state = sk->sk_state;
3131 sock_hold(sk);
3132 sock_orphan(sk);
3133
3134 local_bh_disable();
3135 bh_lock_sock(sk);
3136 /* remove backlog if any, without releasing ownership. */
3137 __release_sock(sk);
3138
3139 this_cpu_inc(tcp_orphan_count);
3140
3141 /* Have we already been destroyed by a softirq or backlog? */
3142 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
3143 goto out;
3144
3145 /* This is a (useful) BSD violating of the RFC. There is a
3146 * problem with TCP as specified in that the other end could
3147 * keep a socket open forever with no application left this end.
3148 * We use a 1 minute timeout (about the same as BSD) then kill
3149 * our end. If they send after that then tough - BUT: long enough
3150 * that we won't make the old 4*rto = almost no time - whoops
3151 * reset mistake.
3152 *
3153 * Nope, it was not mistake. It is really desired behaviour
3154 * f.e. on http servers, when such sockets are useless, but
3155 * consume significant resources. Let's do it with special
3156 * linger2 option. --ANK
3157 */
3158
3159 if (sk->sk_state == TCP_FIN_WAIT2) {
3160 struct tcp_sock *tp = tcp_sk(sk);
3161 if (READ_ONCE(tp->linger2) < 0) {
3162 tcp_set_state(sk, TCP_CLOSE);
3163 tcp_send_active_reset(sk, GFP_ATOMIC,
3164 SK_RST_REASON_TCP_ABORT_ON_LINGER);
3165 __NET_INC_STATS(sock_net(sk),
3166 LINUX_MIB_TCPABORTONLINGER);
3167 } else {
3168 const int tmo = tcp_fin_time(sk);
3169
3170 if (tmo > TCP_TIMEWAIT_LEN) {
3171 inet_csk_reset_keepalive_timer(sk,
3172 tmo - TCP_TIMEWAIT_LEN);
3173 } else {
3174 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
3175 goto out;
3176 }
3177 }
3178 }
3179 if (sk->sk_state != TCP_CLOSE) {
3180 if (tcp_check_oom(sk, 0)) {
3181 tcp_set_state(sk, TCP_CLOSE);
3182 tcp_send_active_reset(sk, GFP_ATOMIC,
3183 SK_RST_REASON_TCP_ABORT_ON_MEMORY);
3184 __NET_INC_STATS(sock_net(sk),
3185 LINUX_MIB_TCPABORTONMEMORY);
3186 } else if (!check_net(sock_net(sk))) {
3187 /* Not possible to send reset; just close */
3188 tcp_set_state(sk, TCP_CLOSE);
3189 }
3190 }
3191
3192 if (sk->sk_state == TCP_CLOSE) {
3193 struct request_sock *req;
3194
3195 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
3196 lockdep_sock_is_held(sk));
3197 /* We could get here with a non-NULL req if the socket is
3198 * aborted (e.g., closed with unread data) before 3WHS
3199 * finishes.
3200 */
3201 if (req)
3202 reqsk_fastopen_remove(sk, req, false);
3203 inet_csk_destroy_sock(sk);
3204 }
3205 /* Otherwise, socket is reprieved until protocol close. */
3206
3207out:
3208 bh_unlock_sock(sk);
3209 local_bh_enable();
3210}
3211
3212void tcp_close(struct sock *sk, long timeout)
3213{
3214 lock_sock(sk);
3215 __tcp_close(sk, timeout);
3216 release_sock(sk);
3217 if (!sk->sk_net_refcnt)
3218 inet_csk_clear_xmit_timers_sync(sk);
3219 sock_put(sk);
3220}
3221EXPORT_SYMBOL(tcp_close);
3222
3223/* These states need RST on ABORT according to RFC793 */
3224
3225static inline bool tcp_need_reset(int state)
3226{
3227 return (1 << state) &
3228 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
3229 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
3230}
3231
3232static void tcp_rtx_queue_purge(struct sock *sk)
3233{
3234 struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
3235
3236 tcp_sk(sk)->highest_sack = NULL;
3237 while (p) {
3238 struct sk_buff *skb = rb_to_skb(p);
3239
3240 p = rb_next(p);
3241 /* Since we are deleting whole queue, no need to
3242 * list_del(&skb->tcp_tsorted_anchor)
3243 */
3244 tcp_rtx_queue_unlink(skb, sk);
3245 tcp_wmem_free_skb(sk, skb);
3246 }
3247}
3248
3249void tcp_write_queue_purge(struct sock *sk)
3250{
3251 struct sk_buff *skb;
3252
3253 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
3254 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
3255 tcp_skb_tsorted_anchor_cleanup(skb);
3256 tcp_wmem_free_skb(sk, skb);
3257 }
3258 tcp_rtx_queue_purge(sk);
3259 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
3260 tcp_clear_all_retrans_hints(tcp_sk(sk));
3261 tcp_sk(sk)->packets_out = 0;
3262 inet_csk(sk)->icsk_backoff = 0;
3263}
3264
3265int tcp_disconnect(struct sock *sk, int flags)
3266{
3267 struct inet_sock *inet = inet_sk(sk);
3268 struct inet_connection_sock *icsk = inet_csk(sk);
3269 struct tcp_sock *tp = tcp_sk(sk);
3270 int old_state = sk->sk_state;
3271 u32 seq;
3272
3273 if (old_state != TCP_CLOSE)
3274 tcp_set_state(sk, TCP_CLOSE);
3275
3276 /* ABORT function of RFC793 */
3277 if (old_state == TCP_LISTEN) {
3278 inet_csk_listen_stop(sk);
3279 } else if (unlikely(tp->repair)) {
3280 WRITE_ONCE(sk->sk_err, ECONNABORTED);
3281 } else if (tcp_need_reset(old_state)) {
3282 tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_TCP_STATE);
3283 WRITE_ONCE(sk->sk_err, ECONNRESET);
3284 } else if (tp->snd_nxt != tp->write_seq &&
3285 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
3286 /* The last check adjusts for discrepancy of Linux wrt. RFC
3287 * states
3288 */
3289 tcp_send_active_reset(sk, gfp_any(),
3290 SK_RST_REASON_TCP_DISCONNECT_WITH_DATA);
3291 WRITE_ONCE(sk->sk_err, ECONNRESET);
3292 } else if (old_state == TCP_SYN_SENT)
3293 WRITE_ONCE(sk->sk_err, ECONNRESET);
3294
3295 tcp_clear_xmit_timers(sk);
3296 __skb_queue_purge(&sk->sk_receive_queue);
3297 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
3298 WRITE_ONCE(tp->urg_data, 0);
3299 sk_set_peek_off(sk, -1);
3300 tcp_write_queue_purge(sk);
3301 tcp_fastopen_active_disable_ofo_check(sk);
3302 skb_rbtree_purge(&tp->out_of_order_queue);
3303
3304 inet->inet_dport = 0;
3305
3306 inet_bhash2_reset_saddr(sk);
3307
3308 WRITE_ONCE(sk->sk_shutdown, 0);
3309 sock_reset_flag(sk, SOCK_DONE);
3310 tp->srtt_us = 0;
3311 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
3312 tp->rcv_rtt_last_tsecr = 0;
3313
3314 seq = tp->write_seq + tp->max_window + 2;
3315 if (!seq)
3316 seq = 1;
3317 WRITE_ONCE(tp->write_seq, seq);
3318
3319 icsk->icsk_backoff = 0;
3320 icsk->icsk_probes_out = 0;
3321 icsk->icsk_probes_tstamp = 0;
3322 icsk->icsk_rto = TCP_TIMEOUT_INIT;
3323 icsk->icsk_rto_min = TCP_RTO_MIN;
3324 icsk->icsk_delack_max = TCP_DELACK_MAX;
3325 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
3326 tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
3327 tp->snd_cwnd_cnt = 0;
3328 tp->is_cwnd_limited = 0;
3329 tp->max_packets_out = 0;
3330 tp->window_clamp = 0;
3331 tp->delivered = 0;
3332 tp->delivered_ce = 0;
3333 if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release)
3334 icsk->icsk_ca_ops->release(sk);
3335 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
3336 icsk->icsk_ca_initialized = 0;
3337 tcp_set_ca_state(sk, TCP_CA_Open);
3338 tp->is_sack_reneg = 0;
3339 tcp_clear_retrans(tp);
3340 tp->total_retrans = 0;
3341 inet_csk_delack_init(sk);
3342 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
3343 * issue in __tcp_select_window()
3344 */
3345 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
3346 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
3347 __sk_dst_reset(sk);
3348 dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL)));
3349 tcp_saved_syn_free(tp);
3350 tp->compressed_ack = 0;
3351 tp->segs_in = 0;
3352 tp->segs_out = 0;
3353 tp->bytes_sent = 0;
3354 tp->bytes_acked = 0;
3355 tp->bytes_received = 0;
3356 tp->bytes_retrans = 0;
3357 tp->data_segs_in = 0;
3358 tp->data_segs_out = 0;
3359 tp->duplicate_sack[0].start_seq = 0;
3360 tp->duplicate_sack[0].end_seq = 0;
3361 tp->dsack_dups = 0;
3362 tp->reord_seen = 0;
3363 tp->retrans_out = 0;
3364 tp->sacked_out = 0;
3365 tp->tlp_high_seq = 0;
3366 tp->last_oow_ack_time = 0;
3367 tp->plb_rehash = 0;
3368 /* There's a bubble in the pipe until at least the first ACK. */
3369 tp->app_limited = ~0U;
3370 tp->rate_app_limited = 1;
3371 tp->rack.mstamp = 0;
3372 tp->rack.advanced = 0;
3373 tp->rack.reo_wnd_steps = 1;
3374 tp->rack.last_delivered = 0;
3375 tp->rack.reo_wnd_persist = 0;
3376 tp->rack.dsack_seen = 0;
3377 tp->syn_data_acked = 0;
3378 tp->rx_opt.saw_tstamp = 0;
3379 tp->rx_opt.dsack = 0;
3380 tp->rx_opt.num_sacks = 0;
3381 tp->rcv_ooopack = 0;
3382
3383
3384 /* Clean up fastopen related fields */
3385 tcp_free_fastopen_req(tp);
3386 inet_clear_bit(DEFER_CONNECT, sk);
3387 tp->fastopen_client_fail = 0;
3388
3389 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
3390
3391 if (sk->sk_frag.page) {
3392 put_page(sk->sk_frag.page);
3393 sk->sk_frag.page = NULL;
3394 sk->sk_frag.offset = 0;
3395 }
3396 sk_error_report(sk);
3397 return 0;
3398}
3399EXPORT_SYMBOL(tcp_disconnect);
3400
3401static inline bool tcp_can_repair_sock(const struct sock *sk)
3402{
3403 return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
3404 (sk->sk_state != TCP_LISTEN);
3405}
3406
3407static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
3408{
3409 struct tcp_repair_window opt;
3410
3411 if (!tp->repair)
3412 return -EPERM;
3413
3414 if (len != sizeof(opt))
3415 return -EINVAL;
3416
3417 if (copy_from_sockptr(&opt, optbuf, sizeof(opt)))
3418 return -EFAULT;
3419
3420 if (opt.max_window < opt.snd_wnd)
3421 return -EINVAL;
3422
3423 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
3424 return -EINVAL;
3425
3426 if (after(opt.rcv_wup, tp->rcv_nxt))
3427 return -EINVAL;
3428
3429 tp->snd_wl1 = opt.snd_wl1;
3430 tp->snd_wnd = opt.snd_wnd;
3431 tp->max_window = opt.max_window;
3432
3433 tp->rcv_wnd = opt.rcv_wnd;
3434 tp->rcv_wup = opt.rcv_wup;
3435
3436 return 0;
3437}
3438
3439static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
3440 unsigned int len)
3441{
3442 struct tcp_sock *tp = tcp_sk(sk);
3443 struct tcp_repair_opt opt;
3444 size_t offset = 0;
3445
3446 while (len >= sizeof(opt)) {
3447 if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt)))
3448 return -EFAULT;
3449
3450 offset += sizeof(opt);
3451 len -= sizeof(opt);
3452
3453 switch (opt.opt_code) {
3454 case TCPOPT_MSS:
3455 tp->rx_opt.mss_clamp = opt.opt_val;
3456 tcp_mtup_init(sk);
3457 break;
3458 case TCPOPT_WINDOW:
3459 {
3460 u16 snd_wscale = opt.opt_val & 0xFFFF;
3461 u16 rcv_wscale = opt.opt_val >> 16;
3462
3463 if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
3464 return -EFBIG;
3465
3466 tp->rx_opt.snd_wscale = snd_wscale;
3467 tp->rx_opt.rcv_wscale = rcv_wscale;
3468 tp->rx_opt.wscale_ok = 1;
3469 }
3470 break;
3471 case TCPOPT_SACK_PERM:
3472 if (opt.opt_val != 0)
3473 return -EINVAL;
3474
3475 tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
3476 break;
3477 case TCPOPT_TIMESTAMP:
3478 if (opt.opt_val != 0)
3479 return -EINVAL;
3480
3481 tp->rx_opt.tstamp_ok = 1;
3482 break;
3483 }
3484 }
3485
3486 return 0;
3487}
3488
3489DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
3490EXPORT_SYMBOL(tcp_tx_delay_enabled);
3491
3492static void tcp_enable_tx_delay(void)
3493{
3494 if (!static_branch_unlikely(&tcp_tx_delay_enabled)) {
3495 static int __tcp_tx_delay_enabled = 0;
3496
3497 if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) {
3498 static_branch_enable(&tcp_tx_delay_enabled);
3499 pr_info("TCP_TX_DELAY enabled\n");
3500 }
3501 }
3502}
3503
3504/* When set indicates to always queue non-full frames. Later the user clears
3505 * this option and we transmit any pending partial frames in the queue. This is
3506 * meant to be used alongside sendfile() to get properly filled frames when the
3507 * user (for example) must write out headers with a write() call first and then
3508 * use sendfile to send out the data parts.
3509 *
3510 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
3511 * TCP_NODELAY.
3512 */
3513void __tcp_sock_set_cork(struct sock *sk, bool on)
3514{
3515 struct tcp_sock *tp = tcp_sk(sk);
3516
3517 if (on) {
3518 tp->nonagle |= TCP_NAGLE_CORK;
3519 } else {
3520 tp->nonagle &= ~TCP_NAGLE_CORK;
3521 if (tp->nonagle & TCP_NAGLE_OFF)
3522 tp->nonagle |= TCP_NAGLE_PUSH;
3523 tcp_push_pending_frames(sk);
3524 }
3525}
3526
3527void tcp_sock_set_cork(struct sock *sk, bool on)
3528{
3529 lock_sock(sk);
3530 __tcp_sock_set_cork(sk, on);
3531 release_sock(sk);
3532}
3533EXPORT_SYMBOL(tcp_sock_set_cork);
3534
3535/* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
3536 * remembered, but it is not activated until cork is cleared.
3537 *
3538 * However, when TCP_NODELAY is set we make an explicit push, which overrides
3539 * even TCP_CORK for currently queued segments.
3540 */
3541void __tcp_sock_set_nodelay(struct sock *sk, bool on)
3542{
3543 if (on) {
3544 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
3545 tcp_push_pending_frames(sk);
3546 } else {
3547 tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF;
3548 }
3549}
3550
3551void tcp_sock_set_nodelay(struct sock *sk)
3552{
3553 lock_sock(sk);
3554 __tcp_sock_set_nodelay(sk, true);
3555 release_sock(sk);
3556}
3557EXPORT_SYMBOL(tcp_sock_set_nodelay);
3558
3559static void __tcp_sock_set_quickack(struct sock *sk, int val)
3560{
3561 if (!val) {
3562 inet_csk_enter_pingpong_mode(sk);
3563 return;
3564 }
3565
3566 inet_csk_exit_pingpong_mode(sk);
3567 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
3568 inet_csk_ack_scheduled(sk)) {
3569 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED;
3570 tcp_cleanup_rbuf(sk, 1);
3571 if (!(val & 1))
3572 inet_csk_enter_pingpong_mode(sk);
3573 }
3574}
3575
3576void tcp_sock_set_quickack(struct sock *sk, int val)
3577{
3578 lock_sock(sk);
3579 __tcp_sock_set_quickack(sk, val);
3580 release_sock(sk);
3581}
3582EXPORT_SYMBOL(tcp_sock_set_quickack);
3583
3584int tcp_sock_set_syncnt(struct sock *sk, int val)
3585{
3586 if (val < 1 || val > MAX_TCP_SYNCNT)
3587 return -EINVAL;
3588
3589 WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val);
3590 return 0;
3591}
3592EXPORT_SYMBOL(tcp_sock_set_syncnt);
3593
3594int tcp_sock_set_user_timeout(struct sock *sk, int val)
3595{
3596 /* Cap the max time in ms TCP will retry or probe the window
3597 * before giving up and aborting (ETIMEDOUT) a connection.
3598 */
3599 if (val < 0)
3600 return -EINVAL;
3601
3602 WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val);
3603 return 0;
3604}
3605EXPORT_SYMBOL(tcp_sock_set_user_timeout);
3606
3607int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
3608{
3609 struct tcp_sock *tp = tcp_sk(sk);
3610
3611 if (val < 1 || val > MAX_TCP_KEEPIDLE)
3612 return -EINVAL;
3613
3614 /* Paired with WRITE_ONCE() in keepalive_time_when() */
3615 WRITE_ONCE(tp->keepalive_time, val * HZ);
3616 if (sock_flag(sk, SOCK_KEEPOPEN) &&
3617 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
3618 u32 elapsed = keepalive_time_elapsed(tp);
3619
3620 if (tp->keepalive_time > elapsed)
3621 elapsed = tp->keepalive_time - elapsed;
3622 else
3623 elapsed = 0;
3624 inet_csk_reset_keepalive_timer(sk, elapsed);
3625 }
3626
3627 return 0;
3628}
3629
3630int tcp_sock_set_keepidle(struct sock *sk, int val)
3631{
3632 int err;
3633
3634 lock_sock(sk);
3635 err = tcp_sock_set_keepidle_locked(sk, val);
3636 release_sock(sk);
3637 return err;
3638}
3639EXPORT_SYMBOL(tcp_sock_set_keepidle);
3640
3641int tcp_sock_set_keepintvl(struct sock *sk, int val)
3642{
3643 if (val < 1 || val > MAX_TCP_KEEPINTVL)
3644 return -EINVAL;
3645
3646 WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ);
3647 return 0;
3648}
3649EXPORT_SYMBOL(tcp_sock_set_keepintvl);
3650
3651int tcp_sock_set_keepcnt(struct sock *sk, int val)
3652{
3653 if (val < 1 || val > MAX_TCP_KEEPCNT)
3654 return -EINVAL;
3655
3656 /* Paired with READ_ONCE() in keepalive_probes() */
3657 WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val);
3658 return 0;
3659}
3660EXPORT_SYMBOL(tcp_sock_set_keepcnt);
3661
3662int tcp_set_window_clamp(struct sock *sk, int val)
3663{
3664 struct tcp_sock *tp = tcp_sk(sk);
3665
3666 if (!val) {
3667 if (sk->sk_state != TCP_CLOSE)
3668 return -EINVAL;
3669 WRITE_ONCE(tp->window_clamp, 0);
3670 } else {
3671 u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
3672 u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
3673 SOCK_MIN_RCVBUF / 2 : val;
3674
3675 if (new_window_clamp == old_window_clamp)
3676 return 0;
3677
3678 WRITE_ONCE(tp->window_clamp, new_window_clamp);
3679 if (new_window_clamp < old_window_clamp) {
3680 /* need to apply the reserved mem provisioning only
3681 * when shrinking the window clamp
3682 */
3683 __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
3684
3685 } else {
3686 new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
3687 tp->rcv_ssthresh = max(new_rcv_ssthresh,
3688 tp->rcv_ssthresh);
3689 }
3690 }
3691 return 0;
3692}
3693
3694/*
3695 * Socket option code for TCP.
3696 */
3697int do_tcp_setsockopt(struct sock *sk, int level, int optname,
3698 sockptr_t optval, unsigned int optlen)
3699{
3700 struct tcp_sock *tp = tcp_sk(sk);
3701 struct inet_connection_sock *icsk = inet_csk(sk);
3702 struct net *net = sock_net(sk);
3703 int val;
3704 int err = 0;
3705
3706 /* These are data/string values, all the others are ints */
3707 switch (optname) {
3708 case TCP_CONGESTION: {
3709 char name[TCP_CA_NAME_MAX];
3710
3711 if (optlen < 1)
3712 return -EINVAL;
3713
3714 val = strncpy_from_sockptr(name, optval,
3715 min_t(long, TCP_CA_NAME_MAX-1, optlen));
3716 if (val < 0)
3717 return -EFAULT;
3718 name[val] = 0;
3719
3720 sockopt_lock_sock(sk);
3721 err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(),
3722 sockopt_ns_capable(sock_net(sk)->user_ns,
3723 CAP_NET_ADMIN));
3724 sockopt_release_sock(sk);
3725 return err;
3726 }
3727 case TCP_ULP: {
3728 char name[TCP_ULP_NAME_MAX];
3729
3730 if (optlen < 1)
3731 return -EINVAL;
3732
3733 val = strncpy_from_sockptr(name, optval,
3734 min_t(long, TCP_ULP_NAME_MAX - 1,
3735 optlen));
3736 if (val < 0)
3737 return -EFAULT;
3738 name[val] = 0;
3739
3740 sockopt_lock_sock(sk);
3741 err = tcp_set_ulp(sk, name);
3742 sockopt_release_sock(sk);
3743 return err;
3744 }
3745 case TCP_FASTOPEN_KEY: {
3746 __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
3747 __u8 *backup_key = NULL;
3748
3749 /* Allow a backup key as well to facilitate key rotation
3750 * First key is the active one.
3751 */
3752 if (optlen != TCP_FASTOPEN_KEY_LENGTH &&
3753 optlen != TCP_FASTOPEN_KEY_BUF_LENGTH)
3754 return -EINVAL;
3755
3756 if (copy_from_sockptr(key, optval, optlen))
3757 return -EFAULT;
3758
3759 if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH)
3760 backup_key = key + TCP_FASTOPEN_KEY_LENGTH;
3761
3762 return tcp_fastopen_reset_cipher(net, sk, key, backup_key);
3763 }
3764 default:
3765 /* fallthru */
3766 break;
3767 }
3768
3769 if (optlen < sizeof(int))
3770 return -EINVAL;
3771
3772 if (copy_from_sockptr(&val, optval, sizeof(val)))
3773 return -EFAULT;
3774
3775 /* Handle options that can be set without locking the socket. */
3776 switch (optname) {
3777 case TCP_SYNCNT:
3778 return tcp_sock_set_syncnt(sk, val);
3779 case TCP_USER_TIMEOUT:
3780 return tcp_sock_set_user_timeout(sk, val);
3781 case TCP_KEEPINTVL:
3782 return tcp_sock_set_keepintvl(sk, val);
3783 case TCP_KEEPCNT:
3784 return tcp_sock_set_keepcnt(sk, val);
3785 case TCP_LINGER2:
3786 if (val < 0)
3787 WRITE_ONCE(tp->linger2, -1);
3788 else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
3789 WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
3790 else
3791 WRITE_ONCE(tp->linger2, val * HZ);
3792 return 0;
3793 case TCP_DEFER_ACCEPT:
3794 /* Translate value in seconds to number of retransmits */
3795 WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
3796 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
3797 TCP_RTO_MAX / HZ));
3798 return 0;
3799 }
3800
3801 sockopt_lock_sock(sk);
3802
3803 switch (optname) {
3804 case TCP_MAXSEG:
3805 /* Values greater than interface MTU won't take effect. However
3806 * at the point when this call is done we typically don't yet
3807 * know which interface is going to be used
3808 */
3809 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
3810 err = -EINVAL;
3811 break;
3812 }
3813 tp->rx_opt.user_mss = val;
3814 break;
3815
3816 case TCP_NODELAY:
3817 __tcp_sock_set_nodelay(sk, val);
3818 break;
3819
3820 case TCP_THIN_LINEAR_TIMEOUTS:
3821 if (val < 0 || val > 1)
3822 err = -EINVAL;
3823 else
3824 tp->thin_lto = val;
3825 break;
3826
3827 case TCP_THIN_DUPACK:
3828 if (val < 0 || val > 1)
3829 err = -EINVAL;
3830 break;
3831
3832 case TCP_REPAIR:
3833 if (!tcp_can_repair_sock(sk))
3834 err = -EPERM;
3835 else if (val == TCP_REPAIR_ON) {
3836 tp->repair = 1;
3837 sk->sk_reuse = SK_FORCE_REUSE;
3838 tp->repair_queue = TCP_NO_QUEUE;
3839 } else if (val == TCP_REPAIR_OFF) {
3840 tp->repair = 0;
3841 sk->sk_reuse = SK_NO_REUSE;
3842 tcp_send_window_probe(sk);
3843 } else if (val == TCP_REPAIR_OFF_NO_WP) {
3844 tp->repair = 0;
3845 sk->sk_reuse = SK_NO_REUSE;
3846 } else
3847 err = -EINVAL;
3848
3849 break;
3850
3851 case TCP_REPAIR_QUEUE:
3852 if (!tp->repair)
3853 err = -EPERM;
3854 else if ((unsigned int)val < TCP_QUEUES_NR)
3855 tp->repair_queue = val;
3856 else
3857 err = -EINVAL;
3858 break;
3859
3860 case TCP_QUEUE_SEQ:
3861 if (sk->sk_state != TCP_CLOSE) {
3862 err = -EPERM;
3863 } else if (tp->repair_queue == TCP_SEND_QUEUE) {
3864 if (!tcp_rtx_queue_empty(sk))
3865 err = -EPERM;
3866 else
3867 WRITE_ONCE(tp->write_seq, val);
3868 } else if (tp->repair_queue == TCP_RECV_QUEUE) {
3869 if (tp->rcv_nxt != tp->copied_seq) {
3870 err = -EPERM;
3871 } else {
3872 WRITE_ONCE(tp->rcv_nxt, val);
3873 WRITE_ONCE(tp->copied_seq, val);
3874 }
3875 } else {
3876 err = -EINVAL;
3877 }
3878 break;
3879
3880 case TCP_REPAIR_OPTIONS:
3881 if (!tp->repair)
3882 err = -EINVAL;
3883 else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent)
3884 err = tcp_repair_options_est(sk, optval, optlen);
3885 else
3886 err = -EPERM;
3887 break;
3888
3889 case TCP_CORK:
3890 __tcp_sock_set_cork(sk, val);
3891 break;
3892
3893 case TCP_KEEPIDLE:
3894 err = tcp_sock_set_keepidle_locked(sk, val);
3895 break;
3896 case TCP_SAVE_SYN:
3897 /* 0: disable, 1: enable, 2: start from ether_header */
3898 if (val < 0 || val > 2)
3899 err = -EINVAL;
3900 else
3901 tp->save_syn = val;
3902 break;
3903
3904 case TCP_WINDOW_CLAMP:
3905 err = tcp_set_window_clamp(sk, val);
3906 break;
3907
3908 case TCP_QUICKACK:
3909 __tcp_sock_set_quickack(sk, val);
3910 break;
3911
3912 case TCP_AO_REPAIR:
3913 if (!tcp_can_repair_sock(sk)) {
3914 err = -EPERM;
3915 break;
3916 }
3917 err = tcp_ao_set_repair(sk, optval, optlen);
3918 break;
3919#ifdef CONFIG_TCP_AO
3920 case TCP_AO_ADD_KEY:
3921 case TCP_AO_DEL_KEY:
3922 case TCP_AO_INFO: {
3923 /* If this is the first TCP-AO setsockopt() on the socket,
3924 * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR
3925 * in any state.
3926 */
3927 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
3928 goto ao_parse;
3929 if (rcu_dereference_protected(tcp_sk(sk)->ao_info,
3930 lockdep_sock_is_held(sk)))
3931 goto ao_parse;
3932 if (tp->repair)
3933 goto ao_parse;
3934 err = -EISCONN;
3935 break;
3936ao_parse:
3937 err = tp->af_specific->ao_parse(sk, optname, optval, optlen);
3938 break;
3939 }
3940#endif
3941#ifdef CONFIG_TCP_MD5SIG
3942 case TCP_MD5SIG:
3943 case TCP_MD5SIG_EXT:
3944 err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3945 break;
3946#endif
3947 case TCP_FASTOPEN:
3948 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
3949 TCPF_LISTEN))) {
3950 tcp_fastopen_init_key_once(net);
3951
3952 fastopen_queue_tune(sk, val);
3953 } else {
3954 err = -EINVAL;
3955 }
3956 break;
3957 case TCP_FASTOPEN_CONNECT:
3958 if (val > 1 || val < 0) {
3959 err = -EINVAL;
3960 } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) &
3961 TFO_CLIENT_ENABLE) {
3962 if (sk->sk_state == TCP_CLOSE)
3963 tp->fastopen_connect = val;
3964 else
3965 err = -EINVAL;
3966 } else {
3967 err = -EOPNOTSUPP;
3968 }
3969 break;
3970 case TCP_FASTOPEN_NO_COOKIE:
3971 if (val > 1 || val < 0)
3972 err = -EINVAL;
3973 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
3974 err = -EINVAL;
3975 else
3976 tp->fastopen_no_cookie = val;
3977 break;
3978 case TCP_TIMESTAMP:
3979 if (!tp->repair) {
3980 err = -EPERM;
3981 break;
3982 }
3983 /* val is an opaque field,
3984 * and low order bit contains usec_ts enable bit.
3985 * Its a best effort, and we do not care if user makes an error.
3986 */
3987 tp->tcp_usec_ts = val & 1;
3988 WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts));
3989 break;
3990 case TCP_REPAIR_WINDOW:
3991 err = tcp_repair_set_window(tp, optval, optlen);
3992 break;
3993 case TCP_NOTSENT_LOWAT:
3994 WRITE_ONCE(tp->notsent_lowat, val);
3995 sk->sk_write_space(sk);
3996 break;
3997 case TCP_INQ:
3998 if (val > 1 || val < 0)
3999 err = -EINVAL;
4000 else
4001 tp->recvmsg_inq = val;
4002 break;
4003 case TCP_TX_DELAY:
4004 if (val)
4005 tcp_enable_tx_delay();
4006 WRITE_ONCE(tp->tcp_tx_delay, val);
4007 break;
4008 default:
4009 err = -ENOPROTOOPT;
4010 break;
4011 }
4012
4013 sockopt_release_sock(sk);
4014 return err;
4015}
4016
4017int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
4018 unsigned int optlen)
4019{
4020 const struct inet_connection_sock *icsk = inet_csk(sk);
4021
4022 if (level != SOL_TCP)
4023 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
4024 return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname,
4025 optval, optlen);
4026 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
4027}
4028EXPORT_SYMBOL(tcp_setsockopt);
4029
4030static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
4031 struct tcp_info *info)
4032{
4033 u64 stats[__TCP_CHRONO_MAX], total = 0;
4034 enum tcp_chrono i;
4035
4036 for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
4037 stats[i] = tp->chrono_stat[i - 1];
4038 if (i == tp->chrono_type)
4039 stats[i] += tcp_jiffies32 - tp->chrono_start;
4040 stats[i] *= USEC_PER_SEC / HZ;
4041 total += stats[i];
4042 }
4043
4044 info->tcpi_busy_time = total;
4045 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
4046 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
4047}
4048
4049/* Return information about state of tcp endpoint in API format. */
4050void tcp_get_info(struct sock *sk, struct tcp_info *info)
4051{
4052 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
4053 const struct inet_connection_sock *icsk = inet_csk(sk);
4054 unsigned long rate;
4055 u32 now;
4056 u64 rate64;
4057 bool slow;
4058
4059 memset(info, 0, sizeof(*info));
4060 if (sk->sk_type != SOCK_STREAM)
4061 return;
4062
4063 info->tcpi_state = inet_sk_state_load(sk);
4064
4065 /* Report meaningful fields for all TCP states, including listeners */
4066 rate = READ_ONCE(sk->sk_pacing_rate);
4067 rate64 = (rate != ~0UL) ? rate : ~0ULL;
4068 info->tcpi_pacing_rate = rate64;
4069
4070 rate = READ_ONCE(sk->sk_max_pacing_rate);
4071 rate64 = (rate != ~0UL) ? rate : ~0ULL;
4072 info->tcpi_max_pacing_rate = rate64;
4073
4074 info->tcpi_reordering = tp->reordering;
4075 info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
4076
4077 if (info->tcpi_state == TCP_LISTEN) {
4078 /* listeners aliased fields :
4079 * tcpi_unacked -> Number of children ready for accept()
4080 * tcpi_sacked -> max backlog
4081 */
4082 info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
4083 info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
4084 return;
4085 }
4086
4087 slow = lock_sock_fast(sk);
4088
4089 info->tcpi_ca_state = icsk->icsk_ca_state;
4090 info->tcpi_retransmits = icsk->icsk_retransmits;
4091 info->tcpi_probes = icsk->icsk_probes_out;
4092 info->tcpi_backoff = icsk->icsk_backoff;
4093
4094 if (tp->rx_opt.tstamp_ok)
4095 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
4096 if (tcp_is_sack(tp))
4097 info->tcpi_options |= TCPI_OPT_SACK;
4098 if (tp->rx_opt.wscale_ok) {
4099 info->tcpi_options |= TCPI_OPT_WSCALE;
4100 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
4101 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
4102 }
4103
4104 if (tp->ecn_flags & TCP_ECN_OK)
4105 info->tcpi_options |= TCPI_OPT_ECN;
4106 if (tp->ecn_flags & TCP_ECN_SEEN)
4107 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
4108 if (tp->syn_data_acked)
4109 info->tcpi_options |= TCPI_OPT_SYN_DATA;
4110 if (tp->tcp_usec_ts)
4111 info->tcpi_options |= TCPI_OPT_USEC_TS;
4112
4113 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
4114 info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato,
4115 tcp_delack_max(sk)));
4116 info->tcpi_snd_mss = tp->mss_cache;
4117 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
4118
4119 info->tcpi_unacked = tp->packets_out;
4120 info->tcpi_sacked = tp->sacked_out;
4121
4122 info->tcpi_lost = tp->lost_out;
4123 info->tcpi_retrans = tp->retrans_out;
4124
4125 now = tcp_jiffies32;
4126 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
4127 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
4128 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
4129
4130 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
4131 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
4132 info->tcpi_rtt = tp->srtt_us >> 3;
4133 info->tcpi_rttvar = tp->mdev_us >> 2;
4134 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
4135 info->tcpi_advmss = tp->advmss;
4136
4137 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
4138 info->tcpi_rcv_space = tp->rcvq_space.space;
4139
4140 info->tcpi_total_retrans = tp->total_retrans;
4141
4142 info->tcpi_bytes_acked = tp->bytes_acked;
4143 info->tcpi_bytes_received = tp->bytes_received;
4144 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
4145 tcp_get_info_chrono_stats(tp, info);
4146
4147 info->tcpi_segs_out = tp->segs_out;
4148
4149 /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */
4150 info->tcpi_segs_in = READ_ONCE(tp->segs_in);
4151 info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in);
4152
4153 info->tcpi_min_rtt = tcp_min_rtt(tp);
4154 info->tcpi_data_segs_out = tp->data_segs_out;
4155
4156 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
4157 rate64 = tcp_compute_delivery_rate(tp);
4158 if (rate64)
4159 info->tcpi_delivery_rate = rate64;
4160 info->tcpi_delivered = tp->delivered;
4161 info->tcpi_delivered_ce = tp->delivered_ce;
4162 info->tcpi_bytes_sent = tp->bytes_sent;
4163 info->tcpi_bytes_retrans = tp->bytes_retrans;
4164 info->tcpi_dsack_dups = tp->dsack_dups;
4165 info->tcpi_reord_seen = tp->reord_seen;
4166 info->tcpi_rcv_ooopack = tp->rcv_ooopack;
4167 info->tcpi_snd_wnd = tp->snd_wnd;
4168 info->tcpi_rcv_wnd = tp->rcv_wnd;
4169 info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
4170 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
4171
4172 info->tcpi_total_rto = tp->total_rto;
4173 info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
4174 info->tcpi_total_rto_time = tp->total_rto_time;
4175 if (tp->rto_stamp)
4176 info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp;
4177
4178 unlock_sock_fast(sk, slow);
4179}
4180EXPORT_SYMBOL_GPL(tcp_get_info);
4181
4182static size_t tcp_opt_stats_get_size(void)
4183{
4184 return
4185 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */
4186 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */
4187 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */
4188 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */
4189 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */
4190 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */
4191 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */
4192 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */
4193 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */
4194 nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */
4195 nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */
4196 nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
4197 nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */
4198 nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */
4199 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */
4200 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */
4201 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */
4202 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
4203 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
4204 nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
4205 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
4206 nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
4207 nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
4208 nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
4209 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */
4210 nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */
4211 nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */
4212 0;
4213}
4214
4215/* Returns TTL or hop limit of an incoming packet from skb. */
4216static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb)
4217{
4218 if (skb->protocol == htons(ETH_P_IP))
4219 return ip_hdr(skb)->ttl;
4220 else if (skb->protocol == htons(ETH_P_IPV6))
4221 return ipv6_hdr(skb)->hop_limit;
4222 else
4223 return 0;
4224}
4225
4226struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
4227 const struct sk_buff *orig_skb,
4228 const struct sk_buff *ack_skb)
4229{
4230 const struct tcp_sock *tp = tcp_sk(sk);
4231 struct sk_buff *stats;
4232 struct tcp_info info;
4233 unsigned long rate;
4234 u64 rate64;
4235
4236 stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
4237 if (!stats)
4238 return NULL;
4239
4240 tcp_get_info_chrono_stats(tp, &info);
4241 nla_put_u64_64bit(stats, TCP_NLA_BUSY,
4242 info.tcpi_busy_time, TCP_NLA_PAD);
4243 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
4244 info.tcpi_rwnd_limited, TCP_NLA_PAD);
4245 nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
4246 info.tcpi_sndbuf_limited, TCP_NLA_PAD);
4247 nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
4248 tp->data_segs_out, TCP_NLA_PAD);
4249 nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
4250 tp->total_retrans, TCP_NLA_PAD);
4251
4252 rate = READ_ONCE(sk->sk_pacing_rate);
4253 rate64 = (rate != ~0UL) ? rate : ~0ULL;
4254 nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
4255
4256 rate64 = tcp_compute_delivery_rate(tp);
4257 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
4258
4259 nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
4260 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
4261 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
4262
4263 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
4264 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
4265 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
4266 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
4267 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
4268
4269 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
4270 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
4271
4272 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
4273 TCP_NLA_PAD);
4274 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
4275 TCP_NLA_PAD);
4276 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
4277 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
4278 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
4279 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
4280 nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT,
4281 max_t(int, 0, tp->write_seq - tp->snd_nxt));
4282 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns,
4283 TCP_NLA_PAD);
4284 if (ack_skb)
4285 nla_put_u8(stats, TCP_NLA_TTL,
4286 tcp_skb_ttl_or_hop_limit(ack_skb));
4287
4288 nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash);
4289 return stats;
4290}
4291
4292int do_tcp_getsockopt(struct sock *sk, int level,
4293 int optname, sockptr_t optval, sockptr_t optlen)
4294{
4295 struct inet_connection_sock *icsk = inet_csk(sk);
4296 struct tcp_sock *tp = tcp_sk(sk);
4297 struct net *net = sock_net(sk);
4298 int val, len;
4299
4300 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4301 return -EFAULT;
4302
4303 if (len < 0)
4304 return -EINVAL;
4305
4306 len = min_t(unsigned int, len, sizeof(int));
4307
4308 switch (optname) {
4309 case TCP_MAXSEG:
4310 val = tp->mss_cache;
4311 if (tp->rx_opt.user_mss &&
4312 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
4313 val = tp->rx_opt.user_mss;
4314 if (tp->repair)
4315 val = tp->rx_opt.mss_clamp;
4316 break;
4317 case TCP_NODELAY:
4318 val = !!(tp->nonagle&TCP_NAGLE_OFF);
4319 break;
4320 case TCP_CORK:
4321 val = !!(tp->nonagle&TCP_NAGLE_CORK);
4322 break;
4323 case TCP_KEEPIDLE:
4324 val = keepalive_time_when(tp) / HZ;
4325 break;
4326 case TCP_KEEPINTVL:
4327 val = keepalive_intvl_when(tp) / HZ;
4328 break;
4329 case TCP_KEEPCNT:
4330 val = keepalive_probes(tp);
4331 break;
4332 case TCP_SYNCNT:
4333 val = READ_ONCE(icsk->icsk_syn_retries) ? :
4334 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
4335 break;
4336 case TCP_LINGER2:
4337 val = READ_ONCE(tp->linger2);
4338 if (val >= 0)
4339 val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
4340 break;
4341 case TCP_DEFER_ACCEPT:
4342 val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
4343 val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
4344 TCP_RTO_MAX / HZ);
4345 break;
4346 case TCP_WINDOW_CLAMP:
4347 val = READ_ONCE(tp->window_clamp);
4348 break;
4349 case TCP_INFO: {
4350 struct tcp_info info;
4351
4352 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4353 return -EFAULT;
4354
4355 tcp_get_info(sk, &info);
4356
4357 len = min_t(unsigned int, len, sizeof(info));
4358 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4359 return -EFAULT;
4360 if (copy_to_sockptr(optval, &info, len))
4361 return -EFAULT;
4362 return 0;
4363 }
4364 case TCP_CC_INFO: {
4365 const struct tcp_congestion_ops *ca_ops;
4366 union tcp_cc_info info;
4367 size_t sz = 0;
4368 int attr;
4369
4370 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4371 return -EFAULT;
4372
4373 ca_ops = icsk->icsk_ca_ops;
4374 if (ca_ops && ca_ops->get_info)
4375 sz = ca_ops->get_info(sk, ~0U, &attr, &info);
4376
4377 len = min_t(unsigned int, len, sz);
4378 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4379 return -EFAULT;
4380 if (copy_to_sockptr(optval, &info, len))
4381 return -EFAULT;
4382 return 0;
4383 }
4384 case TCP_QUICKACK:
4385 val = !inet_csk_in_pingpong_mode(sk);
4386 break;
4387
4388 case TCP_CONGESTION:
4389 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4390 return -EFAULT;
4391 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
4392 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4393 return -EFAULT;
4394 if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len))
4395 return -EFAULT;
4396 return 0;
4397
4398 case TCP_ULP:
4399 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4400 return -EFAULT;
4401 len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
4402 if (!icsk->icsk_ulp_ops) {
4403 len = 0;
4404 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4405 return -EFAULT;
4406 return 0;
4407 }
4408 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4409 return -EFAULT;
4410 if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len))
4411 return -EFAULT;
4412 return 0;
4413
4414 case TCP_FASTOPEN_KEY: {
4415 u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
4416 unsigned int key_len;
4417
4418 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4419 return -EFAULT;
4420
4421 key_len = tcp_fastopen_get_cipher(net, icsk, key) *
4422 TCP_FASTOPEN_KEY_LENGTH;
4423 len = min_t(unsigned int, len, key_len);
4424 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4425 return -EFAULT;
4426 if (copy_to_sockptr(optval, key, len))
4427 return -EFAULT;
4428 return 0;
4429 }
4430 case TCP_THIN_LINEAR_TIMEOUTS:
4431 val = tp->thin_lto;
4432 break;
4433
4434 case TCP_THIN_DUPACK:
4435 val = 0;
4436 break;
4437
4438 case TCP_REPAIR:
4439 val = tp->repair;
4440 break;
4441
4442 case TCP_REPAIR_QUEUE:
4443 if (tp->repair)
4444 val = tp->repair_queue;
4445 else
4446 return -EINVAL;
4447 break;
4448
4449 case TCP_REPAIR_WINDOW: {
4450 struct tcp_repair_window opt;
4451
4452 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4453 return -EFAULT;
4454
4455 if (len != sizeof(opt))
4456 return -EINVAL;
4457
4458 if (!tp->repair)
4459 return -EPERM;
4460
4461 opt.snd_wl1 = tp->snd_wl1;
4462 opt.snd_wnd = tp->snd_wnd;
4463 opt.max_window = tp->max_window;
4464 opt.rcv_wnd = tp->rcv_wnd;
4465 opt.rcv_wup = tp->rcv_wup;
4466
4467 if (copy_to_sockptr(optval, &opt, len))
4468 return -EFAULT;
4469 return 0;
4470 }
4471 case TCP_QUEUE_SEQ:
4472 if (tp->repair_queue == TCP_SEND_QUEUE)
4473 val = tp->write_seq;
4474 else if (tp->repair_queue == TCP_RECV_QUEUE)
4475 val = tp->rcv_nxt;
4476 else
4477 return -EINVAL;
4478 break;
4479
4480 case TCP_USER_TIMEOUT:
4481 val = READ_ONCE(icsk->icsk_user_timeout);
4482 break;
4483
4484 case TCP_FASTOPEN:
4485 val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
4486 break;
4487
4488 case TCP_FASTOPEN_CONNECT:
4489 val = tp->fastopen_connect;
4490 break;
4491
4492 case TCP_FASTOPEN_NO_COOKIE:
4493 val = tp->fastopen_no_cookie;
4494 break;
4495
4496 case TCP_TX_DELAY:
4497 val = READ_ONCE(tp->tcp_tx_delay);
4498 break;
4499
4500 case TCP_TIMESTAMP:
4501 val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset);
4502 if (tp->tcp_usec_ts)
4503 val |= 1;
4504 else
4505 val &= ~1;
4506 break;
4507 case TCP_NOTSENT_LOWAT:
4508 val = READ_ONCE(tp->notsent_lowat);
4509 break;
4510 case TCP_INQ:
4511 val = tp->recvmsg_inq;
4512 break;
4513 case TCP_SAVE_SYN:
4514 val = tp->save_syn;
4515 break;
4516 case TCP_SAVED_SYN: {
4517 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4518 return -EFAULT;
4519
4520 sockopt_lock_sock(sk);
4521 if (tp->saved_syn) {
4522 if (len < tcp_saved_syn_len(tp->saved_syn)) {
4523 len = tcp_saved_syn_len(tp->saved_syn);
4524 if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4525 sockopt_release_sock(sk);
4526 return -EFAULT;
4527 }
4528 sockopt_release_sock(sk);
4529 return -EINVAL;
4530 }
4531 len = tcp_saved_syn_len(tp->saved_syn);
4532 if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4533 sockopt_release_sock(sk);
4534 return -EFAULT;
4535 }
4536 if (copy_to_sockptr(optval, tp->saved_syn->data, len)) {
4537 sockopt_release_sock(sk);
4538 return -EFAULT;
4539 }
4540 tcp_saved_syn_free(tp);
4541 sockopt_release_sock(sk);
4542 } else {
4543 sockopt_release_sock(sk);
4544 len = 0;
4545 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4546 return -EFAULT;
4547 }
4548 return 0;
4549 }
4550#ifdef CONFIG_MMU
4551 case TCP_ZEROCOPY_RECEIVE: {
4552 struct scm_timestamping_internal tss;
4553 struct tcp_zerocopy_receive zc = {};
4554 int err;
4555
4556 if (copy_from_sockptr(&len, optlen, sizeof(int)))
4557 return -EFAULT;
4558 if (len < 0 ||
4559 len < offsetofend(struct tcp_zerocopy_receive, length))
4560 return -EINVAL;
4561 if (unlikely(len > sizeof(zc))) {
4562 err = check_zeroed_sockptr(optval, sizeof(zc),
4563 len - sizeof(zc));
4564 if (err < 1)
4565 return err == 0 ? -EINVAL : err;
4566 len = sizeof(zc);
4567 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4568 return -EFAULT;
4569 }
4570 if (copy_from_sockptr(&zc, optval, len))
4571 return -EFAULT;
4572 if (zc.reserved)
4573 return -EINVAL;
4574 if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS))
4575 return -EINVAL;
4576 sockopt_lock_sock(sk);
4577 err = tcp_zerocopy_receive(sk, &zc, &tss);
4578 err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
4579 &zc, &len, err);
4580 sockopt_release_sock(sk);
4581 if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
4582 goto zerocopy_rcv_cmsg;
4583 switch (len) {
4584 case offsetofend(struct tcp_zerocopy_receive, msg_flags):
4585 goto zerocopy_rcv_cmsg;
4586 case offsetofend(struct tcp_zerocopy_receive, msg_controllen):
4587 case offsetofend(struct tcp_zerocopy_receive, msg_control):
4588 case offsetofend(struct tcp_zerocopy_receive, flags):
4589 case offsetofend(struct tcp_zerocopy_receive, copybuf_len):
4590 case offsetofend(struct tcp_zerocopy_receive, copybuf_address):
4591 case offsetofend(struct tcp_zerocopy_receive, err):
4592 goto zerocopy_rcv_sk_err;
4593 case offsetofend(struct tcp_zerocopy_receive, inq):
4594 goto zerocopy_rcv_inq;
4595 case offsetofend(struct tcp_zerocopy_receive, length):
4596 default:
4597 goto zerocopy_rcv_out;
4598 }
4599zerocopy_rcv_cmsg:
4600 if (zc.msg_flags & TCP_CMSG_TS)
4601 tcp_zc_finalize_rx_tstamp(sk, &zc, &tss);
4602 else
4603 zc.msg_flags = 0;
4604zerocopy_rcv_sk_err:
4605 if (!err)
4606 zc.err = sock_error(sk);
4607zerocopy_rcv_inq:
4608 zc.inq = tcp_inq_hint(sk);
4609zerocopy_rcv_out:
4610 if (!err && copy_to_sockptr(optval, &zc, len))
4611 err = -EFAULT;
4612 return err;
4613 }
4614#endif
4615 case TCP_AO_REPAIR:
4616 if (!tcp_can_repair_sock(sk))
4617 return -EPERM;
4618 return tcp_ao_get_repair(sk, optval, optlen);
4619 case TCP_AO_GET_KEYS:
4620 case TCP_AO_INFO: {
4621 int err;
4622
4623 sockopt_lock_sock(sk);
4624 if (optname == TCP_AO_GET_KEYS)
4625 err = tcp_ao_get_mkts(sk, optval, optlen);
4626 else
4627 err = tcp_ao_get_sock_info(sk, optval, optlen);
4628 sockopt_release_sock(sk);
4629
4630 return err;
4631 }
4632 case TCP_IS_MPTCP:
4633 val = 0;
4634 break;
4635 default:
4636 return -ENOPROTOOPT;
4637 }
4638
4639 if (copy_to_sockptr(optlen, &len, sizeof(int)))
4640 return -EFAULT;
4641 if (copy_to_sockptr(optval, &val, len))
4642 return -EFAULT;
4643 return 0;
4644}
4645
4646bool tcp_bpf_bypass_getsockopt(int level, int optname)
4647{
4648 /* TCP do_tcp_getsockopt has optimized getsockopt implementation
4649 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
4650 */
4651 if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
4652 return true;
4653
4654 return false;
4655}
4656EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt);
4657
4658int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
4659 int __user *optlen)
4660{
4661 struct inet_connection_sock *icsk = inet_csk(sk);
4662
4663 if (level != SOL_TCP)
4664 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
4665 return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname,
4666 optval, optlen);
4667 return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval),
4668 USER_SOCKPTR(optlen));
4669}
4670EXPORT_SYMBOL(tcp_getsockopt);
4671
4672#ifdef CONFIG_TCP_MD5SIG
4673int tcp_md5_sigpool_id = -1;
4674EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id);
4675
4676int tcp_md5_alloc_sigpool(void)
4677{
4678 size_t scratch_size;
4679 int ret;
4680
4681 scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr);
4682 ret = tcp_sigpool_alloc_ahash("md5", scratch_size);
4683 if (ret >= 0) {
4684 /* As long as any md5 sigpool was allocated, the return
4685 * id would stay the same. Re-write the id only for the case
4686 * when previously all MD5 keys were deleted and this call
4687 * allocates the first MD5 key, which may return a different
4688 * sigpool id than was used previously.
4689 */
4690 WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */
4691 return 0;
4692 }
4693 return ret;
4694}
4695
4696void tcp_md5_release_sigpool(void)
4697{
4698 tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id));
4699}
4700
4701void tcp_md5_add_sigpool(void)
4702{
4703 tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id));
4704}
4705
4706int tcp_md5_hash_key(struct tcp_sigpool *hp,
4707 const struct tcp_md5sig_key *key)
4708{
4709 u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
4710 struct scatterlist sg;
4711
4712 sg_init_one(&sg, key->key, keylen);
4713 ahash_request_set_crypt(hp->req, &sg, NULL, keylen);
4714
4715 /* We use data_race() because tcp_md5_do_add() might change
4716 * key->key under us
4717 */
4718 return data_race(crypto_ahash_update(hp->req));
4719}
4720EXPORT_SYMBOL(tcp_md5_hash_key);
4721
4722/* Called with rcu_read_lock() */
4723static enum skb_drop_reason
4724tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
4725 const void *saddr, const void *daddr,
4726 int family, int l3index, const __u8 *hash_location)
4727{
4728 /* This gets called for each TCP segment that has TCP-MD5 option.
4729 * We have 3 drop cases:
4730 * o No MD5 hash and one expected.
4731 * o MD5 hash and we're not expecting one.
4732 * o MD5 hash and its wrong.
4733 */
4734 const struct tcp_sock *tp = tcp_sk(sk);
4735 struct tcp_md5sig_key *key;
4736 u8 newhash[16];
4737 int genhash;
4738
4739 key = tcp_md5_do_lookup(sk, l3index, saddr, family);
4740
4741 if (!key && hash_location) {
4742 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
4743 trace_tcp_hash_md5_unexpected(sk, skb);
4744 return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
4745 }
4746
4747 /* Check the signature.
4748 * To support dual stack listeners, we need to handle
4749 * IPv4-mapped case.
4750 */
4751 if (family == AF_INET)
4752 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
4753 else
4754 genhash = tp->af_specific->calc_md5_hash(newhash, key,
4755 NULL, skb);
4756 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
4757 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
4758 trace_tcp_hash_md5_mismatch(sk, skb);
4759 return SKB_DROP_REASON_TCP_MD5FAILURE;
4760 }
4761 return SKB_NOT_DROPPED_YET;
4762}
4763#else
4764static inline enum skb_drop_reason
4765tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
4766 const void *saddr, const void *daddr,
4767 int family, int l3index, const __u8 *hash_location)
4768{
4769 return SKB_NOT_DROPPED_YET;
4770}
4771
4772#endif
4773
4774/* Called with rcu_read_lock() */
4775enum skb_drop_reason
4776tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
4777 const struct sk_buff *skb,
4778 const void *saddr, const void *daddr,
4779 int family, int dif, int sdif)
4780{
4781 const struct tcphdr *th = tcp_hdr(skb);
4782 const struct tcp_ao_hdr *aoh;
4783 const __u8 *md5_location;
4784 int l3index;
4785
4786 /* Invalid option or two times meet any of auth options */
4787 if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
4788 trace_tcp_hash_bad_header(sk, skb);
4789 return SKB_DROP_REASON_TCP_AUTH_HDR;
4790 }
4791
4792 if (req) {
4793 if (tcp_rsk_used_ao(req) != !!aoh) {
4794 u8 keyid, rnext, maclen;
4795
4796 if (aoh) {
4797 keyid = aoh->keyid;
4798 rnext = aoh->rnext_keyid;
4799 maclen = tcp_ao_hdr_maclen(aoh);
4800 } else {
4801 keyid = rnext = maclen = 0;
4802 }
4803
4804 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
4805 trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen);
4806 return SKB_DROP_REASON_TCP_AOFAILURE;
4807 }
4808 }
4809
4810 /* sdif set, means packet ingressed via a device
4811 * in an L3 domain and dif is set to the l3mdev
4812 */
4813 l3index = sdif ? dif : 0;
4814
4815 /* Fast path: unsigned segments */
4816 if (likely(!md5_location && !aoh)) {
4817 /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
4818 * for the remote peer. On TCP-AO established connection
4819 * the last key is impossible to remove, so there's
4820 * always at least one current_key.
4821 */
4822 if (tcp_ao_required(sk, saddr, family, l3index, true)) {
4823 trace_tcp_hash_ao_required(sk, skb);
4824 return SKB_DROP_REASON_TCP_AONOTFOUND;
4825 }
4826 if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
4827 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
4828 trace_tcp_hash_md5_required(sk, skb);
4829 return SKB_DROP_REASON_TCP_MD5NOTFOUND;
4830 }
4831 return SKB_NOT_DROPPED_YET;
4832 }
4833
4834 if (aoh)
4835 return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
4836
4837 return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
4838 l3index, md5_location);
4839}
4840EXPORT_SYMBOL_GPL(tcp_inbound_hash);
4841
4842void tcp_done(struct sock *sk)
4843{
4844 struct request_sock *req;
4845
4846 /* We might be called with a new socket, after
4847 * inet_csk_prepare_forced_close() has been called
4848 * so we can not use lockdep_sock_is_held(sk)
4849 */
4850 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
4851
4852 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
4853 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
4854
4855 tcp_set_state(sk, TCP_CLOSE);
4856 tcp_clear_xmit_timers(sk);
4857 if (req)
4858 reqsk_fastopen_remove(sk, req, false);
4859
4860 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
4861
4862 if (!sock_flag(sk, SOCK_DEAD))
4863 sk->sk_state_change(sk);
4864 else
4865 inet_csk_destroy_sock(sk);
4866}
4867EXPORT_SYMBOL_GPL(tcp_done);
4868
4869int tcp_abort(struct sock *sk, int err)
4870{
4871 int state = inet_sk_state_load(sk);
4872
4873 if (state == TCP_NEW_SYN_RECV) {
4874 struct request_sock *req = inet_reqsk(sk);
4875
4876 local_bh_disable();
4877 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
4878 local_bh_enable();
4879 return 0;
4880 }
4881 if (state == TCP_TIME_WAIT) {
4882 struct inet_timewait_sock *tw = inet_twsk(sk);
4883
4884 refcount_inc(&tw->tw_refcnt);
4885 local_bh_disable();
4886 inet_twsk_deschedule_put(tw);
4887 local_bh_enable();
4888 return 0;
4889 }
4890
4891 /* BPF context ensures sock locking. */
4892 if (!has_current_bpf_ctx())
4893 /* Don't race with userspace socket closes such as tcp_close. */
4894 lock_sock(sk);
4895
4896 /* Avoid closing the same socket twice. */
4897 if (sk->sk_state == TCP_CLOSE) {
4898 if (!has_current_bpf_ctx())
4899 release_sock(sk);
4900 return -ENOENT;
4901 }
4902
4903 if (sk->sk_state == TCP_LISTEN) {
4904 tcp_set_state(sk, TCP_CLOSE);
4905 inet_csk_listen_stop(sk);
4906 }
4907
4908 /* Don't race with BH socket closes such as inet_csk_listen_stop. */
4909 local_bh_disable();
4910 bh_lock_sock(sk);
4911
4912 if (tcp_need_reset(sk->sk_state))
4913 tcp_send_active_reset(sk, GFP_ATOMIC,
4914 SK_RST_REASON_TCP_STATE);
4915 tcp_done_with_error(sk, err);
4916
4917 bh_unlock_sock(sk);
4918 local_bh_enable();
4919 if (!has_current_bpf_ctx())
4920 release_sock(sk);
4921 return 0;
4922}
4923EXPORT_SYMBOL_GPL(tcp_abort);
4924
4925extern struct tcp_congestion_ops tcp_reno;
4926
4927static __initdata unsigned long thash_entries;
4928static int __init set_thash_entries(char *str)
4929{
4930 ssize_t ret;
4931
4932 if (!str)
4933 return 0;
4934
4935 ret = kstrtoul(str, 0, &thash_entries);
4936 if (ret)
4937 return 0;
4938
4939 return 1;
4940}
4941__setup("thash_entries=", set_thash_entries);
4942
4943static void __init tcp_init_mem(void)
4944{
4945 unsigned long limit = nr_free_buffer_pages() / 16;
4946
4947 limit = max(limit, 128UL);
4948 sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */
4949 sysctl_tcp_mem[1] = limit; /* 6.25 % */
4950 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */
4951}
4952
4953static void __init tcp_struct_check(void)
4954{
4955 /* TX read-mostly hotpath cache lines */
4956 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window);
4957 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh);
4958 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering);
4959 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat);
4960 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs);
4961 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint);
4962 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint);
4963 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40);
4964
4965 /* TXRX read-mostly hotpath cache lines */
4966 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset);
4967 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd);
4968 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache);
4969 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd);
4970 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out);
4971 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out);
4972 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out);
4973 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio);
4974 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32);
4975
4976 /* RX read-mostly hotpath cache lines */
4977 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
4978 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp);
4979 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1);
4980 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq);
4981 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us);
4982 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out);
4983 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss);
4984 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data);
4985 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost);
4986 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min);
4987 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue);
4988 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
4989 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69);
4990
4991 /* TX read-write hotpath cache lines */
4992 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out);
4993 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out);
4994 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent);
4995 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml);
4996 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start);
4997 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat);
4998 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq);
4999 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq);
5000 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime);
5001 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us);
5002 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns);
5003 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq);
5004 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
5005 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
5006 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
5007 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89);
5008
5009 /* TXRX read-write hotpath cache lines */
5010 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
5011 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache);
5012 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp);
5013 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt);
5014 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt);
5015 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una);
5016 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp);
5017 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us);
5018 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out);
5019 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up);
5020 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered);
5021 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce);
5022 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited);
5023 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd);
5024 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt);
5025
5026 /* 32bit arches with 8byte alignment on u64 fields might need padding
5027 * before tcp_clock_cache.
5028 */
5029 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4);
5030
5031 /* RX read-write hotpath cache lines */
5032 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received);
5033 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in);
5034 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in);
5035 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup);
5036 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out);
5037 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq);
5038 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered);
5039 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us);
5040 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr);
5041 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp);
5042 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp);
5043 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked);
5044 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est);
5045 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space);
5046 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99);
5047}
5048
5049void __init tcp_init(void)
5050{
5051 int max_rshare, max_wshare, cnt;
5052 unsigned long limit;
5053 unsigned int i;
5054
5055 BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
5056 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
5057 sizeof_field(struct sk_buff, cb));
5058
5059 tcp_struct_check();
5060
5061 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
5062
5063 timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
5064 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
5065
5066 inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
5067 thash_entries, 21, /* one slot per 2 MB*/
5068 0, 64 * 1024);
5069 tcp_hashinfo.bind_bucket_cachep =
5070 kmem_cache_create("tcp_bind_bucket",
5071 sizeof(struct inet_bind_bucket), 0,
5072 SLAB_HWCACHE_ALIGN | SLAB_PANIC |
5073 SLAB_ACCOUNT,
5074 NULL);
5075 tcp_hashinfo.bind2_bucket_cachep =
5076 kmem_cache_create("tcp_bind2_bucket",
5077 sizeof(struct inet_bind2_bucket), 0,
5078 SLAB_HWCACHE_ALIGN | SLAB_PANIC |
5079 SLAB_ACCOUNT,
5080 NULL);
5081
5082 /* Size and allocate the main established and bind bucket
5083 * hash tables.
5084 *
5085 * The methodology is similar to that of the buffer cache.
5086 */
5087 tcp_hashinfo.ehash =
5088 alloc_large_system_hash("TCP established",
5089 sizeof(struct inet_ehash_bucket),
5090 thash_entries,
5091 17, /* one slot per 128 KB of memory */
5092 0,
5093 NULL,
5094 &tcp_hashinfo.ehash_mask,
5095 0,
5096 thash_entries ? 0 : 512 * 1024);
5097 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
5098 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
5099
5100 if (inet_ehash_locks_alloc(&tcp_hashinfo))
5101 panic("TCP: failed to alloc ehash_locks");
5102 tcp_hashinfo.bhash =
5103 alloc_large_system_hash("TCP bind",
5104 2 * sizeof(struct inet_bind_hashbucket),
5105 tcp_hashinfo.ehash_mask + 1,
5106 17, /* one slot per 128 KB of memory */
5107 0,
5108 &tcp_hashinfo.bhash_size,
5109 NULL,
5110 0,
5111 64 * 1024);
5112 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
5113 tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size;
5114 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
5115 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
5116 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
5117 spin_lock_init(&tcp_hashinfo.bhash2[i].lock);
5118 INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain);
5119 }
5120
5121 tcp_hashinfo.pernet = false;
5122
5123 cnt = tcp_hashinfo.ehash_mask + 1;
5124 sysctl_tcp_max_orphans = cnt / 2;
5125
5126 tcp_init_mem();
5127 /* Set per-socket limits to no more than 1/128 the pressure threshold */
5128 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
5129 max_wshare = min(4UL*1024*1024, limit);
5130 max_rshare = min(6UL*1024*1024, limit);
5131
5132 init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE;
5133 init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
5134 init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
5135
5136 init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE;
5137 init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
5138 init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
5139
5140 pr_info("Hash tables configured (established %u bind %u)\n",
5141 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
5142
5143 tcp_v4_init();
5144 tcp_metrics_init();
5145 BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
5146 tcp_tasklet_init();
5147 mptcp_init();
5148}