Linux Audio

Check our new training course

Loading...
v3.15
 1#include <linux/err.h>
 2#include <linux/init.h>
 3#include <linux/kernel.h>
 4#include <linux/list.h>
 5#include <linux/tcp.h>
 6#include <linux/rcupdate.h>
 7#include <linux/rculist.h>
 8#include <net/inetpeer.h>
 9#include <net/tcp.h>
10
11int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
12
13struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
14
15static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
16
17void tcp_fastopen_init_key_once(bool publish)
18{
19	static u8 key[TCP_FASTOPEN_KEY_LENGTH];
 
 
 
 
 
 
 
 
 
20
21	/* tcp_fastopen_reset_cipher publishes the new context
22	 * atomically, so we allow this race happening here.
23	 *
24	 * All call sites of tcp_fastopen_cookie_gen also check
25	 * for a valid cookie, so this is an acceptable risk.
26	 */
27	if (net_get_random_once(key, sizeof(key)) && publish)
28		tcp_fastopen_reset_cipher(key, sizeof(key));
29}
30
31static void tcp_fastopen_ctx_free(struct rcu_head *head)
32{
33	struct tcp_fastopen_context *ctx =
34	    container_of(head, struct tcp_fastopen_context, rcu);
35	crypto_free_cipher(ctx->tfm);
36	kfree(ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37}
38
39int tcp_fastopen_reset_cipher(void *key, unsigned int len)
 
40{
41	int err;
42	struct tcp_fastopen_context *ctx, *octx;
 
 
43
44	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
45	if (!ctx)
46		return -ENOMEM;
47	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
 
48
49	if (IS_ERR(ctx->tfm)) {
50		err = PTR_ERR(ctx->tfm);
51error:		kfree(ctx);
52		pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
53		return err;
54	}
55	err = crypto_cipher_setkey(ctx->tfm, key, len);
56	if (err) {
57		pr_err("TCP: TFO cipher key error: %d\n", err);
58		crypto_free_cipher(ctx->tfm);
59		goto error;
60	}
61	memcpy(ctx->key, key, len);
62
63	spin_lock(&tcp_fastopen_ctx_lock);
64
65	octx = rcu_dereference_protected(tcp_fastopen_ctx,
66				lockdep_is_held(&tcp_fastopen_ctx_lock));
67	rcu_assign_pointer(tcp_fastopen_ctx, ctx);
68	spin_unlock(&tcp_fastopen_ctx_lock);
69
70	if (octx)
71		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
 
72	return err;
73}
74
75/* Computes the fastopen cookie for the IP path.
76 * The path is a 128 bits long (pad with zeros for IPv4).
77 *
78 * The caller must check foc->len to determine if a valid cookie
79 * has been generated successfully.
80*/
81void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
82			     struct tcp_fastopen_cookie *foc)
83{
84	__be32 path[4] = { src, dst, 0, 0 };
85	struct tcp_fastopen_context *ctx;
86
87	tcp_fastopen_init_key_once(true);
88
89	rcu_read_lock();
90	ctx = rcu_dereference(tcp_fastopen_ctx);
 
 
 
91	if (ctx) {
92		crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
 
94	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95	rcu_read_unlock();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include <linux/kernel.h>
 
  3#include <linux/tcp.h>
  4#include <linux/rcupdate.h>
 
 
  5#include <net/tcp.h>
  6
  7void tcp_fastopen_init_key_once(struct net *net)
 
 
 
 
 
 
  8{
  9	u8 key[TCP_FASTOPEN_KEY_LENGTH];
 10	struct tcp_fastopen_context *ctxt;
 11
 12	rcu_read_lock();
 13	ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
 14	if (ctxt) {
 15		rcu_read_unlock();
 16		return;
 17	}
 18	rcu_read_unlock();
 19
 20	/* tcp_fastopen_reset_cipher publishes the new context
 21	 * atomically, so we allow this race happening here.
 22	 *
 23	 * All call sites of tcp_fastopen_cookie_gen also check
 24	 * for a valid cookie, so this is an acceptable risk.
 25	 */
 26	get_random_bytes(key, sizeof(key));
 27	tcp_fastopen_reset_cipher(net, NULL, key, NULL);
 28}
 29
 30static void tcp_fastopen_ctx_free(struct rcu_head *head)
 31{
 32	struct tcp_fastopen_context *ctx =
 33	    container_of(head, struct tcp_fastopen_context, rcu);
 34
 35	kfree_sensitive(ctx);
 36}
 37
 38void tcp_fastopen_destroy_cipher(struct sock *sk)
 39{
 40	struct tcp_fastopen_context *ctx;
 41
 42	ctx = rcu_dereference_protected(
 43			inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
 44	if (ctx)
 45		call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
 46}
 47
 48void tcp_fastopen_ctx_destroy(struct net *net)
 49{
 50	struct tcp_fastopen_context *ctxt;
 51
 52	ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL);
 53
 54	if (ctxt)
 55		call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
 56}
 57
 58int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
 59			      void *primary_key, void *backup_key)
 60{
 
 61	struct tcp_fastopen_context *ctx, *octx;
 62	struct fastopen_queue *q;
 63	int err = 0;
 64
 65	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 66	if (!ctx) {
 67		err = -ENOMEM;
 68		goto out;
 69	}
 70
 71	ctx->key[0].key[0] = get_unaligned_le64(primary_key);
 72	ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
 73	if (backup_key) {
 74		ctx->key[1].key[0] = get_unaligned_le64(backup_key);
 75		ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
 76		ctx->num = 2;
 77	} else {
 78		ctx->num = 1;
 79	}
 80
 81	if (sk) {
 82		q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
 83		octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx);
 84	} else {
 85		octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx);
 86	}
 
 
 
 
 87
 88	if (octx)
 89		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
 90out:
 91	return err;
 92}
 93
 94int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
 95			    u64 *key)
 
 
 
 
 
 
 96{
 
 97	struct tcp_fastopen_context *ctx;
 98	int n_keys = 0, i;
 
 99
100	rcu_read_lock();
101	if (icsk)
102		ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
103	else
104		ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
105	if (ctx) {
106		n_keys = tcp_fastopen_context_len(ctx);
107		for (i = 0; i < n_keys; i++) {
108			put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
109			put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
110		}
111	}
112	rcu_read_unlock();
113
114	return n_keys;
115}
116
117static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
118					     struct sk_buff *syn,
119					     const siphash_key_t *key,
120					     struct tcp_fastopen_cookie *foc)
121{
122	BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
123
124	if (req->rsk_ops->family == AF_INET) {
125		const struct iphdr *iph = ip_hdr(syn);
126
127		foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
128					  sizeof(iph->saddr) +
129					  sizeof(iph->daddr),
130					  key));
131		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
132		return true;
133	}
134#if IS_ENABLED(CONFIG_IPV6)
135	if (req->rsk_ops->family == AF_INET6) {
136		const struct ipv6hdr *ip6h = ipv6_hdr(syn);
137
138		foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
139					  sizeof(ip6h->saddr) +
140					  sizeof(ip6h->daddr),
141					  key));
142		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
143		return true;
144	}
145#endif
146	return false;
147}
148
149/* Generate the fastopen cookie by applying SipHash to both the source and
150 * destination addresses.
151 */
152static void tcp_fastopen_cookie_gen(struct sock *sk,
153				    struct request_sock *req,
154				    struct sk_buff *syn,
155				    struct tcp_fastopen_cookie *foc)
156{
157	struct tcp_fastopen_context *ctx;
158
159	rcu_read_lock();
160	ctx = tcp_fastopen_get_ctx(sk);
161	if (ctx)
162		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
163	rcu_read_unlock();
164}
165
166/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
167 * queue this additional data / FIN.
168 */
169void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
170{
171	struct tcp_sock *tp = tcp_sk(sk);
172
173	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
174		return;
175
176	skb = skb_clone(skb, GFP_ATOMIC);
177	if (!skb)
178		return;
179
180	skb_dst_drop(skb);
181	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
182	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
183	 * to avoid double counting.  Also, tcp_segs_in() expects
184	 * skb->len to include the tcp_hdrlen.  Hence, it should
185	 * be called before __skb_pull().
186	 */
187	tp->segs_in = 0;
188	tcp_segs_in(tp, skb);
189	__skb_pull(skb, tcp_hdrlen(skb));
190	sk_forced_mem_schedule(sk, skb->truesize);
191	skb_set_owner_r(skb, sk);
192
193	TCP_SKB_CB(skb)->seq++;
194	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
195
196	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
197	__skb_queue_tail(&sk->sk_receive_queue, skb);
198	tp->syn_data_acked = 1;
199
200	/* u64_stats_update_begin(&tp->syncp) not needed here,
201	 * as we certainly are not changing upper 32bit value (0)
202	 */
203	tp->bytes_received = skb->len;
204
205	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
206		tcp_fin(sk);
207}
208
209/* returns 0 - no key match, 1 for primary, 2 for backup */
210static int tcp_fastopen_cookie_gen_check(struct sock *sk,
211					 struct request_sock *req,
212					 struct sk_buff *syn,
213					 struct tcp_fastopen_cookie *orig,
214					 struct tcp_fastopen_cookie *valid_foc)
215{
216	struct tcp_fastopen_cookie search_foc = { .len = -1 };
217	struct tcp_fastopen_cookie *foc = valid_foc;
218	struct tcp_fastopen_context *ctx;
219	int i, ret = 0;
220
221	rcu_read_lock();
222	ctx = tcp_fastopen_get_ctx(sk);
223	if (!ctx)
224		goto out;
225	for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
226		__tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
227		if (tcp_fastopen_cookie_match(foc, orig)) {
228			ret = i + 1;
229			goto out;
230		}
231		foc = &search_foc;
232	}
233out:
234	rcu_read_unlock();
235	return ret;
236}
237
238static struct sock *tcp_fastopen_create_child(struct sock *sk,
239					      struct sk_buff *skb,
240					      struct request_sock *req)
241{
242	struct tcp_sock *tp;
243	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
244	struct sock *child;
245	bool own_req;
246
247	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
248							 NULL, &own_req);
249	if (!child)
250		return NULL;
251
252	spin_lock(&queue->fastopenq.lock);
253	queue->fastopenq.qlen++;
254	spin_unlock(&queue->fastopenq.lock);
255
256	/* Initialize the child socket. Have to fix some values to take
257	 * into account the child is a Fast Open socket and is created
258	 * only out of the bits carried in the SYN packet.
259	 */
260	tp = tcp_sk(child);
261
262	rcu_assign_pointer(tp->fastopen_rsk, req);
263	tcp_rsk(req)->tfo_listener = true;
264
265	/* RFC1323: The window in SYN & SYN/ACK segments is never
266	 * scaled. So correct it appropriately.
267	 */
268	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
269	tp->max_window = tp->snd_wnd;
270
271	/* Activate the retrans timer so that SYNACK can be retransmitted.
272	 * The request socket is not added to the ehash
273	 * because it's been added to the accept queue directly.
274	 */
275	req->timeout = tcp_timeout_init(child);
276	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
277				  req->timeout, TCP_RTO_MAX);
278
279	refcount_set(&req->rsk_refcnt, 2);
280
281	/* Now finish processing the fastopen child socket. */
282	tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
283
284	tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
285
286	tcp_fastopen_add_skb(child, skb);
287
288	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
289	tp->rcv_wup = tp->rcv_nxt;
290	/* tcp_conn_request() is sending the SYNACK,
291	 * and queues the child into listener accept queue.
292	 */
293	return child;
294}
295
296static bool tcp_fastopen_queue_check(struct sock *sk)
297{
298	struct fastopen_queue *fastopenq;
299
300	/* Make sure the listener has enabled fastopen, and we don't
301	 * exceed the max # of pending TFO requests allowed before trying
302	 * to validating the cookie in order to avoid burning CPU cycles
303	 * unnecessarily.
304	 *
305	 * XXX (TFO) - The implication of checking the max_qlen before
306	 * processing a cookie request is that clients can't differentiate
307	 * between qlen overflow causing Fast Open to be disabled
308	 * temporarily vs a server not supporting Fast Open at all.
309	 */
310	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
311	if (fastopenq->max_qlen == 0)
312		return false;
313
314	if (fastopenq->qlen >= fastopenq->max_qlen) {
315		struct request_sock *req1;
316		spin_lock(&fastopenq->lock);
317		req1 = fastopenq->rskq_rst_head;
318		if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
319			__NET_INC_STATS(sock_net(sk),
320					LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
321			spin_unlock(&fastopenq->lock);
322			return false;
323		}
324		fastopenq->rskq_rst_head = req1->dl_next;
325		fastopenq->qlen--;
326		spin_unlock(&fastopenq->lock);
327		reqsk_put(req1);
328	}
329	return true;
330}
331
332static bool tcp_fastopen_no_cookie(const struct sock *sk,
333				   const struct dst_entry *dst,
334				   int flag)
335{
336	return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
337	       tcp_sk(sk)->fastopen_no_cookie ||
338	       (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
339}
340
341/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
342 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
343 * cookie request (foc->len == 0).
344 */
345struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
346			      struct request_sock *req,
347			      struct tcp_fastopen_cookie *foc,
348			      const struct dst_entry *dst)
349{
350	bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
351	int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
352	struct tcp_fastopen_cookie valid_foc = { .len = -1 };
353	struct sock *child;
354	int ret = 0;
355
356	if (foc->len == 0) /* Client requests a cookie */
357		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
358
359	if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
360	      (syn_data || foc->len >= 0) &&
361	      tcp_fastopen_queue_check(sk))) {
362		foc->len = -1;
363		return NULL;
364	}
365
366	if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
367		goto fastopen;
368
369	if (foc->len == 0) {
370		/* Client requests a cookie. */
371		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
372	} else if (foc->len > 0) {
373		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
374						    &valid_foc);
375		if (!ret) {
376			NET_INC_STATS(sock_net(sk),
377				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
378		} else {
379			/* Cookie is valid. Create a (full) child socket to
380			 * accept the data in SYN before returning a SYN-ACK to
381			 * ack the data. If we fail to create the socket, fall
382			 * back and ack the ISN only but includes the same
383			 * cookie.
384			 *
385			 * Note: Data-less SYN with valid cookie is allowed to
386			 * send data in SYN_RECV state.
387			 */
388fastopen:
389			child = tcp_fastopen_create_child(sk, skb, req);
390			if (child) {
391				if (ret == 2) {
392					valid_foc.exp = foc->exp;
393					*foc = valid_foc;
394					NET_INC_STATS(sock_net(sk),
395						      LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
396				} else {
397					foc->len = -1;
398				}
399				NET_INC_STATS(sock_net(sk),
400					      LINUX_MIB_TCPFASTOPENPASSIVE);
401				return child;
402			}
403			NET_INC_STATS(sock_net(sk),
404				      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
405		}
406	}
407	valid_foc.exp = foc->exp;
408	*foc = valid_foc;
409	return NULL;
410}
411
412bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
413			       struct tcp_fastopen_cookie *cookie)
414{
415	const struct dst_entry *dst;
416
417	tcp_fastopen_cache_get(sk, mss, cookie);
418
419	/* Firewall blackhole issue check */
420	if (tcp_fastopen_active_should_disable(sk)) {
421		cookie->len = -1;
422		return false;
423	}
424
425	dst = __sk_dst_get(sk);
426
427	if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
428		cookie->len = -1;
429		return true;
430	}
431	if (cookie->len > 0)
432		return true;
433	tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
434	return false;
435}
436
437/* This function checks if we want to defer sending SYN until the first
438 * write().  We defer under the following conditions:
439 * 1. fastopen_connect sockopt is set
440 * 2. we have a valid cookie
441 * Return value: return true if we want to defer until application writes data
442 *               return false if we want to send out SYN immediately
443 */
444bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
445{
446	struct tcp_fastopen_cookie cookie = { .len = 0 };
447	struct tcp_sock *tp = tcp_sk(sk);
448	u16 mss;
449
450	if (tp->fastopen_connect && !tp->fastopen_req) {
451		if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
452			inet_sk(sk)->defer_connect = 1;
453			return true;
454		}
455
456		/* Alloc fastopen_req in order for FO option to be included
457		 * in SYN
458		 */
459		tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
460					   sk->sk_allocation);
461		if (tp->fastopen_req)
462			tp->fastopen_req->cookie = cookie;
463		else
464			*err = -ENOBUFS;
465	}
466	return false;
467}
468EXPORT_SYMBOL(tcp_fastopen_defer_connect);
469
470/*
471 * The following code block is to deal with middle box issues with TFO:
472 * Middlebox firewall issues can potentially cause server's data being
473 * blackholed after a successful 3WHS using TFO.
474 * The proposed solution is to disable active TFO globally under the
475 * following circumstances:
476 *   1. client side TFO socket receives out of order FIN
477 *   2. client side TFO socket receives out of order RST
478 *   3. client side TFO socket has timed out three times consecutively during
479 *      or after handshake
480 * We disable active side TFO globally for 1hr at first. Then if it
481 * happens again, we disable it for 2h, then 4h, 8h, ...
482 * And we reset the timeout back to 1hr when we see a successful active
483 * TFO connection with data exchanges.
484 */
485
486/* Disable active TFO and record current jiffies and
487 * tfo_active_disable_times
488 */
489void tcp_fastopen_active_disable(struct sock *sk)
490{
491	struct net *net = sock_net(sk);
492
493	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
494		return;
495
496	/* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
497	WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
498
499	/* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
500	 * We want net->ipv4.tfo_active_disable_stamp to be updated first.
501	 */
502	smp_mb__before_atomic();
503	atomic_inc(&net->ipv4.tfo_active_disable_times);
504
505	NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
506}
507
508/* Calculate timeout for tfo active disable
509 * Return true if we are still in the active TFO disable period
510 * Return false if timeout already expired and we should use active TFO
511 */
512bool tcp_fastopen_active_should_disable(struct sock *sk)
513{
514	unsigned int tfo_bh_timeout =
515		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
516	unsigned long timeout;
517	int tfo_da_times;
518	int multiplier;
519
520	if (!tfo_bh_timeout)
521		return false;
522
523	tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
524	if (!tfo_da_times)
525		return false;
526
527	/* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
528	smp_rmb();
529
530	/* Limit timeout to max: 2^6 * initial timeout */
531	multiplier = 1 << min(tfo_da_times - 1, 6);
532
533	/* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
534	timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
535		  multiplier * tfo_bh_timeout * HZ;
536	if (time_before(jiffies, timeout))
537		return true;
538
539	/* Mark check bit so we can check for successful active TFO
540	 * condition and reset tfo_active_disable_times
541	 */
542	tcp_sk(sk)->syn_fastopen_ch = 1;
543	return false;
544}
545
546/* Disable active TFO if FIN is the only packet in the ofo queue
547 * and no data is received.
548 * Also check if we can reset tfo_active_disable_times if data is
549 * received successfully on a marked active TFO sockets opened on
550 * a non-loopback interface
551 */
552void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
553{
554	struct tcp_sock *tp = tcp_sk(sk);
555	struct dst_entry *dst;
556	struct sk_buff *skb;
557
558	if (!tp->syn_fastopen)
559		return;
560
561	if (!tp->data_segs_in) {
562		skb = skb_rb_first(&tp->out_of_order_queue);
563		if (skb && !skb_rb_next(skb)) {
564			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
565				tcp_fastopen_active_disable(sk);
566				return;
567			}
568		}
569	} else if (tp->syn_fastopen_ch &&
570		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
571		dst = sk_dst_get(sk);
572		if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
573			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
574		dst_release(dst);
575	}
576}
577
578void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
579{
580	u32 timeouts = inet_csk(sk)->icsk_retransmits;
581	struct tcp_sock *tp = tcp_sk(sk);
582
583	/* Broken middle-boxes may black-hole Fast Open connection during or
584	 * even after the handshake. Be extremely conservative and pause
585	 * Fast Open globally after hitting the third consecutive timeout or
586	 * exceeding the configured timeout limit.
587	 */
588	if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
589	    (timeouts == 2 || (timeouts < 2 && expired))) {
590		tcp_fastopen_active_disable(sk);
591		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
592	}
593}