Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Pluggable TCP congestion control support and newReno
  4 * congestion control.
  5 * Based on ideas from I/O scheduler support and Web100.
  6 *
  7 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  8 */
  9
 10#define pr_fmt(fmt) "TCP: " fmt
 11
 12#include <linux/module.h>
 13#include <linux/mm.h>
 14#include <linux/types.h>
 15#include <linux/list.h>
 16#include <linux/gfp.h>
 17#include <linux/jhash.h>
 18#include <net/tcp.h>
 19#include <trace/events/tcp.h>
 
 20
 21static DEFINE_SPINLOCK(tcp_cong_list_lock);
 22static LIST_HEAD(tcp_cong_list);
 23
 24/* Simple linear search, don't expect many entries! */
 25struct tcp_congestion_ops *tcp_ca_find(const char *name)
 26{
 27	struct tcp_congestion_ops *e;
 28
 29	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 30		if (strcmp(e->name, name) == 0)
 31			return e;
 32	}
 33
 34	return NULL;
 35}
 36
 37void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
 38{
 39	struct inet_connection_sock *icsk = inet_csk(sk);
 40
 41	trace_tcp_cong_state_set(sk, ca_state);
 42
 43	if (icsk->icsk_ca_ops->set_state)
 44		icsk->icsk_ca_ops->set_state(sk, ca_state);
 45	icsk->icsk_ca_state = ca_state;
 46}
 47
 48/* Must be called with rcu lock held */
 49static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name)
 50{
 51	struct tcp_congestion_ops *ca = tcp_ca_find(name);
 52
 53#ifdef CONFIG_MODULES
 54	if (!ca && capable(CAP_NET_ADMIN)) {
 55		rcu_read_unlock();
 56		request_module("tcp_%s", name);
 57		rcu_read_lock();
 58		ca = tcp_ca_find(name);
 59	}
 60#endif
 61	return ca;
 62}
 63
 64/* Simple linear search, not much in here. */
 65struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
 66{
 67	struct tcp_congestion_ops *e;
 68
 69	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 70		if (e->key == key)
 71			return e;
 72	}
 73
 74	return NULL;
 75}
 76
 77int tcp_validate_congestion_control(struct tcp_congestion_ops *ca)
 78{
 79	/* all algorithms must implement these */
 80	if (!ca->ssthresh || !ca->undo_cwnd ||
 81	    !(ca->cong_avoid || ca->cong_control)) {
 82		pr_err("%s does not implement required ops\n", ca->name);
 83		return -EINVAL;
 84	}
 85
 86	return 0;
 87}
 88
 89/* Attach new congestion control algorithm to the list
 90 * of available options.
 91 */
 92int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 93{
 94	int ret;
 95
 96	ret = tcp_validate_congestion_control(ca);
 97	if (ret)
 98		return ret;
 99
100	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
 
 
 
 
101
102	spin_lock(&tcp_cong_list_lock);
103	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
104		pr_notice("%s already registered or non-unique key\n",
105			  ca->name);
106		ret = -EEXIST;
107	} else {
108		list_add_tail_rcu(&ca->list, &tcp_cong_list);
109		pr_debug("%s registered\n", ca->name);
110	}
111	spin_unlock(&tcp_cong_list_lock);
112
113	return ret;
114}
115EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
116
117/*
118 * Remove congestion control algorithm, called from
119 * the module's remove function.  Module ref counts are used
120 * to ensure that this can't be done till all sockets using
121 * that method are closed.
122 */
123void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
124{
125	spin_lock(&tcp_cong_list_lock);
126	list_del_rcu(&ca->list);
127	spin_unlock(&tcp_cong_list_lock);
128
129	/* Wait for outstanding readers to complete before the
130	 * module gets removed entirely.
131	 *
132	 * A try_module_get() should fail by now as our module is
133	 * in "going" state since no refs are held anymore and
134	 * module_exit() handler being called.
135	 */
136	synchronize_rcu();
137}
138EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
139
140/* Replace a registered old ca with a new one.
141 *
142 * The new ca must have the same name as the old one, that has been
143 * registered.
144 */
145int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
146{
147	struct tcp_congestion_ops *existing;
148	int ret = 0;
149
150	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
151
152	spin_lock(&tcp_cong_list_lock);
153	existing = tcp_ca_find_key(old_ca->key);
154	if (ca->key == TCP_CA_UNSPEC || !existing || strcmp(existing->name, ca->name)) {
155		pr_notice("%s not registered or non-unique key\n",
156			  ca->name);
157		ret = -EINVAL;
158	} else if (existing != old_ca) {
159		pr_notice("invalid old congestion control algorithm to replace\n");
160		ret = -EINVAL;
161	} else {
162		/* Add the new one before removing the old one to keep
163		 * one implementation available all the time.
164		 */
165		list_add_tail_rcu(&ca->list, &tcp_cong_list);
166		list_del_rcu(&existing->list);
167		pr_debug("%s updated\n", ca->name);
168	}
169	spin_unlock(&tcp_cong_list_lock);
170
171	/* Wait for outstanding readers to complete before the
172	 * module or struct_ops gets removed entirely.
173	 */
174	if (!ret)
175		synchronize_rcu();
176
177	return ret;
178}
179
180u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
181{
182	const struct tcp_congestion_ops *ca;
183	u32 key = TCP_CA_UNSPEC;
184
185	might_sleep();
186
187	rcu_read_lock();
188	ca = tcp_ca_find_autoload(name);
189	if (ca) {
190		key = ca->key;
191		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
192	}
193	rcu_read_unlock();
194
195	return key;
196}
197
198char *tcp_ca_get_name_by_key(u32 key, char *buffer)
199{
200	const struct tcp_congestion_ops *ca;
201	char *ret = NULL;
202
203	rcu_read_lock();
204	ca = tcp_ca_find_key(key);
205	if (ca) {
206		strscpy(buffer, ca->name, TCP_CA_NAME_MAX);
207		ret = buffer;
208	}
209	rcu_read_unlock();
210
211	return ret;
212}
213
214/* Assign choice of congestion control. */
215void tcp_assign_congestion_control(struct sock *sk)
216{
217	struct net *net = sock_net(sk);
218	struct inet_connection_sock *icsk = inet_csk(sk);
219	const struct tcp_congestion_ops *ca;
220
221	rcu_read_lock();
222	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
223	if (unlikely(!bpf_try_module_get(ca, ca->owner)))
224		ca = &tcp_reno;
225	icsk->icsk_ca_ops = ca;
226	rcu_read_unlock();
227
228	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
229	if (ca->flags & TCP_CONG_NEEDS_ECN)
230		INET_ECN_xmit(sk);
231	else
232		INET_ECN_dontxmit(sk);
233}
 
 
234
235void tcp_init_congestion_control(struct sock *sk)
236{
237	struct inet_connection_sock *icsk = inet_csk(sk);
 
238
239	tcp_sk(sk)->prior_ssthresh = 0;
240	if (icsk->icsk_ca_ops->init)
241		icsk->icsk_ca_ops->init(sk);
242	if (tcp_ca_needs_ecn(sk))
243		INET_ECN_xmit(sk);
244	else
245		INET_ECN_dontxmit(sk);
246	icsk->icsk_ca_initialized = 1;
247}
248
249static void tcp_reinit_congestion_control(struct sock *sk,
250					  const struct tcp_congestion_ops *ca)
251{
252	struct inet_connection_sock *icsk = inet_csk(sk);
253
254	tcp_cleanup_congestion_control(sk);
255	icsk->icsk_ca_ops = ca;
256	icsk->icsk_ca_setsockopt = 1;
257	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
258
259	if (ca->flags & TCP_CONG_NEEDS_ECN)
260		INET_ECN_xmit(sk);
261	else
262		INET_ECN_dontxmit(sk);
263
264	if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
265		tcp_init_congestion_control(sk);
266}
267
268/* Manage refcounts on socket close. */
269void tcp_cleanup_congestion_control(struct sock *sk)
270{
271	struct inet_connection_sock *icsk = inet_csk(sk);
272
273	if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release)
274		icsk->icsk_ca_ops->release(sk);
275	icsk->icsk_ca_initialized = 0;
276	bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
277}
278
279/* Used by sysctl to change default congestion control */
280int tcp_set_default_congestion_control(struct net *net, const char *name)
281{
282	struct tcp_congestion_ops *ca;
283	const struct tcp_congestion_ops *prev;
284	int ret;
285
286	rcu_read_lock();
287	ca = tcp_ca_find_autoload(name);
288	if (!ca) {
289		ret = -ENOENT;
290	} else if (!bpf_try_module_get(ca, ca->owner)) {
291		ret = -EBUSY;
292	} else if (!net_eq(net, &init_net) &&
293			!(ca->flags & TCP_CONG_NON_RESTRICTED)) {
294		/* Only init netns can set default to a restricted algorithm */
295		ret = -EPERM;
296	} else {
297		prev = xchg(&net->ipv4.tcp_congestion_control, ca);
298		if (prev)
299			bpf_module_put(prev, prev->owner);
300
301		ca->flags |= TCP_CONG_NON_RESTRICTED;
 
 
302		ret = 0;
303	}
304	rcu_read_unlock();
305
306	return ret;
307}
308
309/* Set default value from kernel configuration at bootup */
310static int __init tcp_congestion_default(void)
311{
312	return tcp_set_default_congestion_control(&init_net,
313						  CONFIG_DEFAULT_TCP_CONG);
314}
315late_initcall(tcp_congestion_default);
316
 
317/* Build string with list of available congestion control values */
318void tcp_get_available_congestion_control(char *buf, size_t maxlen)
319{
320	struct tcp_congestion_ops *ca;
321	size_t offs = 0;
322
323	rcu_read_lock();
324	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
325		offs += snprintf(buf + offs, maxlen - offs,
326				 "%s%s",
327				 offs == 0 ? "" : " ", ca->name);
328
329		if (WARN_ON_ONCE(offs >= maxlen))
330			break;
331	}
332	rcu_read_unlock();
333}
334
335/* Get current default congestion control */
336void tcp_get_default_congestion_control(struct net *net, char *name)
337{
338	const struct tcp_congestion_ops *ca;
 
 
339
340	rcu_read_lock();
341	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
342	strscpy(name, ca->name, TCP_CA_NAME_MAX);
343	rcu_read_unlock();
344}
345
346/* Built list of non-restricted congestion control values */
347void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
348{
349	struct tcp_congestion_ops *ca;
350	size_t offs = 0;
351
352	*buf = '\0';
353	rcu_read_lock();
354	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
355		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
356			continue;
357		offs += snprintf(buf + offs, maxlen - offs,
358				 "%s%s",
359				 offs == 0 ? "" : " ", ca->name);
360
361		if (WARN_ON_ONCE(offs >= maxlen))
362			break;
363	}
364	rcu_read_unlock();
365}
366
367/* Change list of non-restricted congestion control */
368int tcp_set_allowed_congestion_control(char *val)
369{
370	struct tcp_congestion_ops *ca;
371	char *saved_clone, *clone, *name;
372	int ret = 0;
373
374	saved_clone = clone = kstrdup(val, GFP_USER);
375	if (!clone)
376		return -ENOMEM;
377
378	spin_lock(&tcp_cong_list_lock);
379	/* pass 1 check for bad entries */
380	while ((name = strsep(&clone, " ")) && *name) {
381		ca = tcp_ca_find(name);
382		if (!ca) {
383			ret = -ENOENT;
384			goto out;
385		}
386	}
387
388	/* pass 2 clear old values */
389	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
390		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
391
392	/* pass 3 mark as allowed */
393	while ((name = strsep(&val, " ")) && *name) {
394		ca = tcp_ca_find(name);
395		WARN_ON(!ca);
396		if (ca)
397			ca->flags |= TCP_CONG_NON_RESTRICTED;
398	}
399out:
400	spin_unlock(&tcp_cong_list_lock);
401	kfree(saved_clone);
402
403	return ret;
404}
405
406/* Change congestion control for socket. If load is false, then it is the
407 * responsibility of the caller to call tcp_init_congestion_control or
408 * tcp_reinit_congestion_control (if the current congestion control was
409 * already initialized.
410 */
411int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
412			       bool cap_net_admin)
413{
414	struct inet_connection_sock *icsk = inet_csk(sk);
415	const struct tcp_congestion_ops *ca;
416	int err = 0;
417
418	if (icsk->icsk_ca_dst_locked)
419		return -EPERM;
420
421	rcu_read_lock();
422	if (!load)
423		ca = tcp_ca_find(name);
424	else
425		ca = tcp_ca_find_autoload(name);
426
427	/* No change asking for existing value */
428	if (ca == icsk->icsk_ca_ops) {
429		icsk->icsk_ca_setsockopt = 1;
430		goto out;
431	}
432
 
 
 
 
 
 
 
 
 
433	if (!ca)
434		err = -ENOENT;
435	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
 
436		err = -EPERM;
437	else if (!bpf_try_module_get(ca, ca->owner))
 
438		err = -EBUSY;
439	else
440		tcp_reinit_congestion_control(sk, ca);
 
 
 
 
 
 
441 out:
442	rcu_read_unlock();
443	return err;
444}
445
446/* Slow start is used when congestion window is no greater than the slow start
447 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
448 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
449 * something better;) a packet is only considered (s)acked in its entirety to
450 * defend the ACK attacks described in the RFC. Slow start processes a stretch
451 * ACK of degree N as if N acks of degree 1 are received back to back except
452 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
453 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
454 */
455__bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
456{
457	u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
 
458
459	acked -= cwnd - tcp_snd_cwnd(tp);
460	tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
461
462	return acked;
 
 
 
 
 
 
463}
464EXPORT_SYMBOL_GPL(tcp_slow_start);
465
466/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
467 * for every packet that was ACKed.
 
 
 
 
468 */
469__bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
470{
471	/* If credits accumulated at a higher w, apply them gently now. */
472	if (tp->snd_cwnd_cnt >= w) {
473		tp->snd_cwnd_cnt = 0;
474		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
475	}
 
 
476
477	tp->snd_cwnd_cnt += acked;
 
 
478	if (tp->snd_cwnd_cnt >= w) {
479		u32 delta = tp->snd_cwnd_cnt / w;
480
481		tp->snd_cwnd_cnt -= delta * w;
482		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
 
483	}
484	tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
485}
486EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
487
488/*
489 * TCP Reno congestion control
490 * This is special case used for fallback as well.
491 */
492/* This is Jacobson's slow start and congestion avoidance.
493 * SIGCOMM '88, p. 328.
494 */
495__bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
496{
497	struct tcp_sock *tp = tcp_sk(sk);
498
499	if (!tcp_is_cwnd_limited(sk))
500		return;
501
502	/* In "safe" area, increase. */
503	if (tcp_in_slow_start(tp)) {
504		acked = tcp_slow_start(tp, acked);
505		if (!acked)
506			return;
507	}
508	/* In dangerous area, increase slowly. */
509	tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
 
 
 
 
 
 
 
 
 
 
 
510}
511EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
512
513/* Slow start threshold is half the congestion window (min 2) */
514__bpf_kfunc u32 tcp_reno_ssthresh(struct sock *sk)
515{
516	const struct tcp_sock *tp = tcp_sk(sk);
517
518	return max(tcp_snd_cwnd(tp) >> 1U, 2U);
519}
520EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
521
522__bpf_kfunc u32 tcp_reno_undo_cwnd(struct sock *sk)
 
523{
524	const struct tcp_sock *tp = tcp_sk(sk);
525
526	return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
527}
528EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
529
530struct tcp_congestion_ops tcp_reno = {
531	.flags		= TCP_CONG_NON_RESTRICTED,
532	.name		= "reno",
533	.owner		= THIS_MODULE,
534	.ssthresh	= tcp_reno_ssthresh,
535	.cong_avoid	= tcp_reno_cong_avoid,
536	.undo_cwnd	= tcp_reno_undo_cwnd,
 
 
 
 
 
 
 
 
 
 
 
 
537};
v3.5.6
 
  1/*
  2 * Plugable TCP congestion control support and newReno
  3 * congestion control.
  4 * Based on ideas from I/O scheduler suport and Web100.
  5 *
  6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  7 */
  8
  9#define pr_fmt(fmt) "TCP: " fmt
 10
 11#include <linux/module.h>
 12#include <linux/mm.h>
 13#include <linux/types.h>
 14#include <linux/list.h>
 15#include <linux/gfp.h>
 
 16#include <net/tcp.h>
 17
 18int sysctl_tcp_max_ssthresh = 0;
 19
 20static DEFINE_SPINLOCK(tcp_cong_list_lock);
 21static LIST_HEAD(tcp_cong_list);
 22
 23/* Simple linear search, don't expect many entries! */
 24static struct tcp_congestion_ops *tcp_ca_find(const char *name)
 25{
 26	struct tcp_congestion_ops *e;
 27
 28	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 29		if (strcmp(e->name, name) == 0)
 30			return e;
 31	}
 32
 33	return NULL;
 34}
 35
 36/*
 37 * Attach new congestion control algorithm to the list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38 * of available options.
 39 */
 40int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 41{
 42	int ret = 0;
 
 
 
 
 43
 44	/* all algorithms must implement ssthresh and cong_avoid ops */
 45	if (!ca->ssthresh || !ca->cong_avoid) {
 46		pr_err("%s does not implement required ops\n", ca->name);
 47		return -EINVAL;
 48	}
 49
 50	spin_lock(&tcp_cong_list_lock);
 51	if (tcp_ca_find(ca->name)) {
 52		pr_notice("%s already registered\n", ca->name);
 
 53		ret = -EEXIST;
 54	} else {
 55		list_add_tail_rcu(&ca->list, &tcp_cong_list);
 56		pr_info("%s registered\n", ca->name);
 57	}
 58	spin_unlock(&tcp_cong_list_lock);
 59
 60	return ret;
 61}
 62EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
 63
 64/*
 65 * Remove congestion control algorithm, called from
 66 * the module's remove function.  Module ref counts are used
 67 * to ensure that this can't be done till all sockets using
 68 * that method are closed.
 69 */
 70void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
 71{
 72	spin_lock(&tcp_cong_list_lock);
 73	list_del_rcu(&ca->list);
 74	spin_unlock(&tcp_cong_list_lock);
 
 
 
 
 
 
 
 
 
 75}
 76EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78/* Assign choice of congestion control. */
 79void tcp_init_congestion_control(struct sock *sk)
 80{
 
 81	struct inet_connection_sock *icsk = inet_csk(sk);
 82	struct tcp_congestion_ops *ca;
 
 
 
 
 
 
 
 83
 84	/* if no choice made yet assign the current value set as default */
 85	if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) {
 86		rcu_read_lock();
 87		list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
 88			if (try_module_get(ca->owner)) {
 89				icsk->icsk_ca_ops = ca;
 90				break;
 91			}
 92
 93			/* fallback to next available */
 94		}
 95		rcu_read_unlock();
 96	}
 97
 
 98	if (icsk->icsk_ca_ops->init)
 99		icsk->icsk_ca_ops->init(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100}
101
102/* Manage refcounts on socket close. */
103void tcp_cleanup_congestion_control(struct sock *sk)
104{
105	struct inet_connection_sock *icsk = inet_csk(sk);
106
107	if (icsk->icsk_ca_ops->release)
108		icsk->icsk_ca_ops->release(sk);
109	module_put(icsk->icsk_ca_ops->owner);
 
110}
111
112/* Used by sysctl to change default congestion control */
113int tcp_set_default_congestion_control(const char *name)
114{
115	struct tcp_congestion_ops *ca;
116	int ret = -ENOENT;
 
117
118	spin_lock(&tcp_cong_list_lock);
119	ca = tcp_ca_find(name);
120#ifdef CONFIG_MODULES
121	if (!ca && capable(CAP_NET_ADMIN)) {
122		spin_unlock(&tcp_cong_list_lock);
123
124		request_module("tcp_%s", name);
125		spin_lock(&tcp_cong_list_lock);
126		ca = tcp_ca_find(name);
127	}
128#endif
 
 
 
129
130	if (ca) {
131		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */
132		list_move(&ca->list, &tcp_cong_list);
133		ret = 0;
134	}
135	spin_unlock(&tcp_cong_list_lock);
136
137	return ret;
138}
139
140/* Set default value from kernel configuration at bootup */
141static int __init tcp_congestion_default(void)
142{
143	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
 
144}
145late_initcall(tcp_congestion_default);
146
147
148/* Build string with list of available congestion control values */
149void tcp_get_available_congestion_control(char *buf, size_t maxlen)
150{
151	struct tcp_congestion_ops *ca;
152	size_t offs = 0;
153
154	rcu_read_lock();
155	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
156		offs += snprintf(buf + offs, maxlen - offs,
157				 "%s%s",
158				 offs == 0 ? "" : " ", ca->name);
159
 
 
160	}
161	rcu_read_unlock();
162}
163
164/* Get current default congestion control */
165void tcp_get_default_congestion_control(char *name)
166{
167	struct tcp_congestion_ops *ca;
168	/* We will always have reno... */
169	BUG_ON(list_empty(&tcp_cong_list));
170
171	rcu_read_lock();
172	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
173	strncpy(name, ca->name, TCP_CA_NAME_MAX);
174	rcu_read_unlock();
175}
176
177/* Built list of non-restricted congestion control values */
178void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
179{
180	struct tcp_congestion_ops *ca;
181	size_t offs = 0;
182
183	*buf = '\0';
184	rcu_read_lock();
185	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
186		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
187			continue;
188		offs += snprintf(buf + offs, maxlen - offs,
189				 "%s%s",
190				 offs == 0 ? "" : " ", ca->name);
191
 
 
192	}
193	rcu_read_unlock();
194}
195
196/* Change list of non-restricted congestion control */
197int tcp_set_allowed_congestion_control(char *val)
198{
199	struct tcp_congestion_ops *ca;
200	char *saved_clone, *clone, *name;
201	int ret = 0;
202
203	saved_clone = clone = kstrdup(val, GFP_USER);
204	if (!clone)
205		return -ENOMEM;
206
207	spin_lock(&tcp_cong_list_lock);
208	/* pass 1 check for bad entries */
209	while ((name = strsep(&clone, " ")) && *name) {
210		ca = tcp_ca_find(name);
211		if (!ca) {
212			ret = -ENOENT;
213			goto out;
214		}
215	}
216
217	/* pass 2 clear old values */
218	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
219		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
220
221	/* pass 3 mark as allowed */
222	while ((name = strsep(&val, " ")) && *name) {
223		ca = tcp_ca_find(name);
224		WARN_ON(!ca);
225		if (ca)
226			ca->flags |= TCP_CONG_NON_RESTRICTED;
227	}
228out:
229	spin_unlock(&tcp_cong_list_lock);
230	kfree(saved_clone);
231
232	return ret;
233}
234
235
236/* Change congestion control for socket */
237int tcp_set_congestion_control(struct sock *sk, const char *name)
 
 
 
 
238{
239	struct inet_connection_sock *icsk = inet_csk(sk);
240	struct tcp_congestion_ops *ca;
241	int err = 0;
242
 
 
 
243	rcu_read_lock();
244	ca = tcp_ca_find(name);
 
 
 
245
246	/* no change asking for existing value */
247	if (ca == icsk->icsk_ca_ops)
 
248		goto out;
 
249
250#ifdef CONFIG_MODULES
251	/* not found attempt to autoload module */
252	if (!ca && capable(CAP_NET_ADMIN)) {
253		rcu_read_unlock();
254		request_module("tcp_%s", name);
255		rcu_read_lock();
256		ca = tcp_ca_find(name);
257	}
258#endif
259	if (!ca)
260		err = -ENOENT;
261
262	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN)))
263		err = -EPERM;
264
265	else if (!try_module_get(ca->owner))
266		err = -EBUSY;
267
268	else {
269		tcp_cleanup_congestion_control(sk);
270		icsk->icsk_ca_ops = ca;
271
272		if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
273			icsk->icsk_ca_ops->init(sk);
274	}
275 out:
276	rcu_read_unlock();
277	return err;
278}
279
280/* RFC2861 Check whether we are limited by application or congestion window
281 * This is the inverse of cwnd check in tcp_tso_should_defer
 
 
 
 
 
 
282 */
283bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
284{
285	const struct tcp_sock *tp = tcp_sk(sk);
286	u32 left;
287
288	if (in_flight >= tp->snd_cwnd)
289		return true;
290
291	left = tp->snd_cwnd - in_flight;
292	if (sk_can_gso(sk) &&
293	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
294	    left * tp->mss_cache < sk->sk_gso_max_size &&
295	    left < sk->sk_gso_max_segs)
296		return true;
297	return left <= tcp_max_tso_deferred_mss(tp);
298}
299EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
300
301/*
302 * Slow start is used when congestion window is less than slow start
303 * threshold. This version implements the basic RFC2581 version
304 * and optionally supports:
305 * 	RFC3742 Limited Slow Start  	  - growth limited to max_ssthresh
306 *	RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged
307 */
308void tcp_slow_start(struct tcp_sock *tp)
309{
310	int cnt; /* increase in packets */
311
312	/* RFC3465: ABC Slow start
313	 * Increase only after a full MSS of bytes is acked
314	 *
315	 * TCP sender SHOULD increase cwnd by the number of
316	 * previously unacknowledged bytes ACKed by each incoming
317	 * acknowledgment, provided the increase is not more than L
318	 */
319	if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
320		return;
321
322	if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
323		cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */
324	else
325		cnt = tp->snd_cwnd;			/* exponential increase */
326
327	/* RFC3465: ABC
328	 * We MAY increase by 2 if discovered delayed ack
329	 */
330	if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
331		cnt <<= 1;
332	tp->bytes_acked = 0;
333
334	tp->snd_cwnd_cnt += cnt;
335	while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
336		tp->snd_cwnd_cnt -= tp->snd_cwnd;
337		if (tp->snd_cwnd < tp->snd_cwnd_clamp)
338			tp->snd_cwnd++;
339	}
340}
341EXPORT_SYMBOL_GPL(tcp_slow_start);
342
343/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
344void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
345{
346	if (tp->snd_cwnd_cnt >= w) {
347		if (tp->snd_cwnd < tp->snd_cwnd_clamp)
348			tp->snd_cwnd++;
349		tp->snd_cwnd_cnt = 0;
350	} else {
351		tp->snd_cwnd_cnt++;
352	}
 
353}
354EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
355
356/*
357 * TCP Reno congestion control
358 * This is special case used for fallback as well.
359 */
360/* This is Jacobson's slow start and congestion avoidance.
361 * SIGCOMM '88, p. 328.
362 */
363void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
364{
365	struct tcp_sock *tp = tcp_sk(sk);
366
367	if (!tcp_is_cwnd_limited(sk, in_flight))
368		return;
369
370	/* In "safe" area, increase. */
371	if (tp->snd_cwnd <= tp->snd_ssthresh)
372		tcp_slow_start(tp);
373
 
 
374	/* In dangerous area, increase slowly. */
375	else if (sysctl_tcp_abc) {
376		/* RFC3465: Appropriate Byte Count
377		 * increase once for each full cwnd acked
378		 */
379		if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
380			tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
381			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
382				tp->snd_cwnd++;
383		}
384	} else {
385		tcp_cong_avoid_ai(tp, tp->snd_cwnd);
386	}
387}
388EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
389
390/* Slow start threshold is half the congestion window (min 2) */
391u32 tcp_reno_ssthresh(struct sock *sk)
392{
393	const struct tcp_sock *tp = tcp_sk(sk);
394	return max(tp->snd_cwnd >> 1U, 2U);
 
395}
396EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
397
398/* Lower bound on congestion window with halving. */
399u32 tcp_reno_min_cwnd(const struct sock *sk)
400{
401	const struct tcp_sock *tp = tcp_sk(sk);
402	return tp->snd_ssthresh/2;
 
403}
404EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
405
406struct tcp_congestion_ops tcp_reno = {
407	.flags		= TCP_CONG_NON_RESTRICTED,
408	.name		= "reno",
409	.owner		= THIS_MODULE,
410	.ssthresh	= tcp_reno_ssthresh,
411	.cong_avoid	= tcp_reno_cong_avoid,
412	.min_cwnd	= tcp_reno_min_cwnd,
413};
414
415/* Initial congestion control used (until SYN)
416 * really reno under another name so we can tell difference
417 * during tcp_set_default_congestion_control
418 */
419struct tcp_congestion_ops tcp_init_congestion_ops  = {
420	.name		= "",
421	.owner		= THIS_MODULE,
422	.ssthresh	= tcp_reno_ssthresh,
423	.cong_avoid	= tcp_reno_cong_avoid,
424	.min_cwnd	= tcp_reno_min_cwnd,
425};
426EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);