Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Pluggable TCP congestion control support and newReno
  3 * congestion control.
  4 * Based on ideas from I/O scheduler support and Web100.
  5 *
  6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  7 */
  8
  9#define pr_fmt(fmt) "TCP: " fmt
 10
 11#include <linux/module.h>
 12#include <linux/mm.h>
 13#include <linux/types.h>
 14#include <linux/list.h>
 15#include <linux/gfp.h>
 16#include <linux/jhash.h>
 17#include <net/tcp.h>
 18
 19static DEFINE_SPINLOCK(tcp_cong_list_lock);
 20static LIST_HEAD(tcp_cong_list);
 21
 22/* Simple linear search, don't expect many entries! */
 23static struct tcp_congestion_ops *tcp_ca_find(const char *name)
 24{
 25	struct tcp_congestion_ops *e;
 26
 27	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 28		if (strcmp(e->name, name) == 0)
 29			return e;
 30	}
 31
 32	return NULL;
 33}
 34
 35/* Must be called with rcu lock held */
 36static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
 37						       const char *name)
 38{
 39	struct tcp_congestion_ops *ca = tcp_ca_find(name);
 40
 41#ifdef CONFIG_MODULES
 42	if (!ca && capable(CAP_NET_ADMIN)) {
 43		rcu_read_unlock();
 44		request_module("tcp_%s", name);
 45		rcu_read_lock();
 46		ca = tcp_ca_find(name);
 47	}
 48#endif
 49	return ca;
 50}
 51
 52/* Simple linear search, not much in here. */
 53struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
 54{
 55	struct tcp_congestion_ops *e;
 56
 57	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 58		if (e->key == key)
 59			return e;
 60	}
 61
 62	return NULL;
 63}
 64
 65/*
 66 * Attach new congestion control algorithm to the list
 67 * of available options.
 68 */
 69int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 70{
 71	int ret = 0;
 72
 73	/* all algorithms must implement these */
 74	if (!ca->ssthresh || !ca->undo_cwnd ||
 75	    !(ca->cong_avoid || ca->cong_control)) {
 76		pr_err("%s does not implement required ops\n", ca->name);
 77		return -EINVAL;
 78	}
 79
 80	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
 81
 82	spin_lock(&tcp_cong_list_lock);
 83	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
 84		pr_notice("%s already registered or non-unique key\n",
 85			  ca->name);
 86		ret = -EEXIST;
 87	} else {
 88		list_add_tail_rcu(&ca->list, &tcp_cong_list);
 89		pr_debug("%s registered\n", ca->name);
 90	}
 91	spin_unlock(&tcp_cong_list_lock);
 92
 93	return ret;
 94}
 95EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
 96
 97/*
 98 * Remove congestion control algorithm, called from
 99 * the module's remove function.  Module ref counts are used
100 * to ensure that this can't be done till all sockets using
101 * that method are closed.
102 */
103void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
104{
105	spin_lock(&tcp_cong_list_lock);
106	list_del_rcu(&ca->list);
107	spin_unlock(&tcp_cong_list_lock);
108
109	/* Wait for outstanding readers to complete before the
110	 * module gets removed entirely.
111	 *
112	 * A try_module_get() should fail by now as our module is
113	 * in "going" state since no refs are held anymore and
114	 * module_exit() handler being called.
115	 */
116	synchronize_rcu();
117}
118EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
119
120u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
121{
122	const struct tcp_congestion_ops *ca;
123	u32 key = TCP_CA_UNSPEC;
124
125	might_sleep();
126
127	rcu_read_lock();
128	ca = tcp_ca_find_autoload(net, name);
129	if (ca) {
130		key = ca->key;
131		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
132	}
133	rcu_read_unlock();
134
135	return key;
136}
137EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
138
139char *tcp_ca_get_name_by_key(u32 key, char *buffer)
140{
141	const struct tcp_congestion_ops *ca;
142	char *ret = NULL;
143
144	rcu_read_lock();
145	ca = tcp_ca_find_key(key);
146	if (ca)
147		ret = strncpy(buffer, ca->name,
148			      TCP_CA_NAME_MAX);
149	rcu_read_unlock();
150
151	return ret;
152}
153EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
154
155/* Assign choice of congestion control. */
156void tcp_assign_congestion_control(struct sock *sk)
157{
158	struct net *net = sock_net(sk);
159	struct inet_connection_sock *icsk = inet_csk(sk);
160	const struct tcp_congestion_ops *ca;
161
162	rcu_read_lock();
163	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
164	if (unlikely(!try_module_get(ca->owner)))
165		ca = &tcp_reno;
166	icsk->icsk_ca_ops = ca;
 
 
 
 
 
 
167	rcu_read_unlock();
168
169	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
 
 
 
170	if (ca->flags & TCP_CONG_NEEDS_ECN)
171		INET_ECN_xmit(sk);
172	else
173		INET_ECN_dontxmit(sk);
174}
175
176void tcp_init_congestion_control(struct sock *sk)
177{
178	const struct inet_connection_sock *icsk = inet_csk(sk);
179
180	tcp_sk(sk)->prior_ssthresh = 0;
181	if (icsk->icsk_ca_ops->init)
182		icsk->icsk_ca_ops->init(sk);
183	if (tcp_ca_needs_ecn(sk))
184		INET_ECN_xmit(sk);
185	else
186		INET_ECN_dontxmit(sk);
187}
188
189static void tcp_reinit_congestion_control(struct sock *sk,
190					  const struct tcp_congestion_ops *ca)
191{
192	struct inet_connection_sock *icsk = inet_csk(sk);
193
194	tcp_cleanup_congestion_control(sk);
195	icsk->icsk_ca_ops = ca;
196	icsk->icsk_ca_setsockopt = 1;
197	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
198
199	if (sk->sk_state != TCP_CLOSE)
 
200		tcp_init_congestion_control(sk);
 
201}
202
203/* Manage refcounts on socket close. */
204void tcp_cleanup_congestion_control(struct sock *sk)
205{
206	struct inet_connection_sock *icsk = inet_csk(sk);
207
208	if (icsk->icsk_ca_ops->release)
209		icsk->icsk_ca_ops->release(sk);
210	module_put(icsk->icsk_ca_ops->owner);
211}
212
213/* Used by sysctl to change default congestion control */
214int tcp_set_default_congestion_control(struct net *net, const char *name)
215{
216	struct tcp_congestion_ops *ca;
217	const struct tcp_congestion_ops *prev;
218	int ret;
219
220	rcu_read_lock();
221	ca = tcp_ca_find_autoload(net, name);
222	if (!ca) {
223		ret = -ENOENT;
224	} else if (!try_module_get(ca->owner)) {
225		ret = -EBUSY;
226	} else {
227		prev = xchg(&net->ipv4.tcp_congestion_control, ca);
228		if (prev)
229			module_put(prev->owner);
 
230
231		ca->flags |= TCP_CONG_NON_RESTRICTED;
 
 
232		ret = 0;
233	}
234	rcu_read_unlock();
235
236	return ret;
237}
238
239/* Set default value from kernel configuration at bootup */
240static int __init tcp_congestion_default(void)
241{
242	return tcp_set_default_congestion_control(&init_net,
243						  CONFIG_DEFAULT_TCP_CONG);
244}
245late_initcall(tcp_congestion_default);
246
247/* Build string with list of available congestion control values */
248void tcp_get_available_congestion_control(char *buf, size_t maxlen)
249{
250	struct tcp_congestion_ops *ca;
251	size_t offs = 0;
252
253	rcu_read_lock();
254	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
255		offs += snprintf(buf + offs, maxlen - offs,
256				 "%s%s",
257				 offs == 0 ? "" : " ", ca->name);
258	}
259	rcu_read_unlock();
260}
261
262/* Get current default congestion control */
263void tcp_get_default_congestion_control(struct net *net, char *name)
264{
265	const struct tcp_congestion_ops *ca;
 
 
266
267	rcu_read_lock();
268	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
269	strncpy(name, ca->name, TCP_CA_NAME_MAX);
270	rcu_read_unlock();
271}
272
273/* Built list of non-restricted congestion control values */
274void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
275{
276	struct tcp_congestion_ops *ca;
277	size_t offs = 0;
278
279	*buf = '\0';
280	rcu_read_lock();
281	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
282		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
283			continue;
284		offs += snprintf(buf + offs, maxlen - offs,
285				 "%s%s",
286				 offs == 0 ? "" : " ", ca->name);
287	}
288	rcu_read_unlock();
289}
290
291/* Change list of non-restricted congestion control */
292int tcp_set_allowed_congestion_control(char *val)
293{
294	struct tcp_congestion_ops *ca;
295	char *saved_clone, *clone, *name;
296	int ret = 0;
297
298	saved_clone = clone = kstrdup(val, GFP_USER);
299	if (!clone)
300		return -ENOMEM;
301
302	spin_lock(&tcp_cong_list_lock);
303	/* pass 1 check for bad entries */
304	while ((name = strsep(&clone, " ")) && *name) {
305		ca = tcp_ca_find(name);
306		if (!ca) {
307			ret = -ENOENT;
308			goto out;
309		}
310	}
311
312	/* pass 2 clear old values */
313	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
314		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
315
316	/* pass 3 mark as allowed */
317	while ((name = strsep(&val, " ")) && *name) {
318		ca = tcp_ca_find(name);
319		WARN_ON(!ca);
320		if (ca)
321			ca->flags |= TCP_CONG_NON_RESTRICTED;
322	}
323out:
324	spin_unlock(&tcp_cong_list_lock);
325	kfree(saved_clone);
326
327	return ret;
328}
329
330/* Change congestion control for socket. If load is false, then it is the
331 * responsibility of the caller to call tcp_init_congestion_control or
332 * tcp_reinit_congestion_control (if the current congestion control was
333 * already initialized.
334 */
335int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
336{
337	struct inet_connection_sock *icsk = inet_csk(sk);
338	const struct tcp_congestion_ops *ca;
339	int err = 0;
340
341	if (icsk->icsk_ca_dst_locked)
342		return -EPERM;
343
344	rcu_read_lock();
345	if (!load)
346		ca = tcp_ca_find(name);
347	else
348		ca = tcp_ca_find_autoload(sock_net(sk), name);
349
350	/* No change asking for existing value */
351	if (ca == icsk->icsk_ca_ops) {
352		icsk->icsk_ca_setsockopt = 1;
353		goto out;
354	}
355
356	if (!ca) {
357		err = -ENOENT;
358	} else if (!load) {
359		const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
360
361		if (try_module_get(ca->owner)) {
362			if (reinit) {
363				tcp_reinit_congestion_control(sk, ca);
364			} else {
365				icsk->icsk_ca_ops = ca;
366				module_put(old_ca->owner);
367			}
368		} else {
369			err = -EBUSY;
370		}
371	} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
372		     ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
373		err = -EPERM;
374	} else if (!try_module_get(ca->owner)) {
375		err = -EBUSY;
376	} else {
377		tcp_reinit_congestion_control(sk, ca);
378	}
379 out:
380	rcu_read_unlock();
381	return err;
382}
383
384/* Slow start is used when congestion window is no greater than the slow start
385 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
386 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
387 * something better;) a packet is only considered (s)acked in its entirety to
388 * defend the ACK attacks described in the RFC. Slow start processes a stretch
389 * ACK of degree N as if N acks of degree 1 are received back to back except
390 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
391 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
392 */
393u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
394{
395	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
396
397	acked -= cwnd - tp->snd_cwnd;
398	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
399
400	return acked;
401}
402EXPORT_SYMBOL_GPL(tcp_slow_start);
403
404/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
405 * for every packet that was ACKed.
406 */
407void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
408{
409	/* If credits accumulated at a higher w, apply them gently now. */
410	if (tp->snd_cwnd_cnt >= w) {
411		tp->snd_cwnd_cnt = 0;
412		tp->snd_cwnd++;
413	}
414
415	tp->snd_cwnd_cnt += acked;
416	if (tp->snd_cwnd_cnt >= w) {
417		u32 delta = tp->snd_cwnd_cnt / w;
418
419		tp->snd_cwnd_cnt -= delta * w;
420		tp->snd_cwnd += delta;
421	}
422	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
423}
424EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
425
426/*
427 * TCP Reno congestion control
428 * This is special case used for fallback as well.
429 */
430/* This is Jacobson's slow start and congestion avoidance.
431 * SIGCOMM '88, p. 328.
432 */
433void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
434{
435	struct tcp_sock *tp = tcp_sk(sk);
436
437	if (!tcp_is_cwnd_limited(sk))
438		return;
439
440	/* In "safe" area, increase. */
441	if (tcp_in_slow_start(tp)) {
442		acked = tcp_slow_start(tp, acked);
443		if (!acked)
444			return;
445	}
446	/* In dangerous area, increase slowly. */
447	tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
448}
449EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
450
451/* Slow start threshold is half the congestion window (min 2) */
452u32 tcp_reno_ssthresh(struct sock *sk)
453{
454	const struct tcp_sock *tp = tcp_sk(sk);
455
456	return max(tp->snd_cwnd >> 1U, 2U);
457}
458EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
459
460u32 tcp_reno_undo_cwnd(struct sock *sk)
461{
462	const struct tcp_sock *tp = tcp_sk(sk);
463
464	return max(tp->snd_cwnd, tp->prior_cwnd);
465}
466EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
467
468struct tcp_congestion_ops tcp_reno = {
469	.flags		= TCP_CONG_NON_RESTRICTED,
470	.name		= "reno",
471	.owner		= THIS_MODULE,
472	.ssthresh	= tcp_reno_ssthresh,
473	.cong_avoid	= tcp_reno_cong_avoid,
474	.undo_cwnd	= tcp_reno_undo_cwnd,
475};
v4.10.11
  1/*
  2 * Pluggable TCP congestion control support and newReno
  3 * congestion control.
  4 * Based on ideas from I/O scheduler support and Web100.
  5 *
  6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  7 */
  8
  9#define pr_fmt(fmt) "TCP: " fmt
 10
 11#include <linux/module.h>
 12#include <linux/mm.h>
 13#include <linux/types.h>
 14#include <linux/list.h>
 15#include <linux/gfp.h>
 16#include <linux/jhash.h>
 17#include <net/tcp.h>
 18
 19static DEFINE_SPINLOCK(tcp_cong_list_lock);
 20static LIST_HEAD(tcp_cong_list);
 21
 22/* Simple linear search, don't expect many entries! */
 23static struct tcp_congestion_ops *tcp_ca_find(const char *name)
 24{
 25	struct tcp_congestion_ops *e;
 26
 27	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 28		if (strcmp(e->name, name) == 0)
 29			return e;
 30	}
 31
 32	return NULL;
 33}
 34
 35/* Must be called with rcu lock held */
 36static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
 
 37{
 38	const struct tcp_congestion_ops *ca = tcp_ca_find(name);
 
 39#ifdef CONFIG_MODULES
 40	if (!ca && capable(CAP_NET_ADMIN)) {
 41		rcu_read_unlock();
 42		request_module("tcp_%s", name);
 43		rcu_read_lock();
 44		ca = tcp_ca_find(name);
 45	}
 46#endif
 47	return ca;
 48}
 49
 50/* Simple linear search, not much in here. */
 51struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
 52{
 53	struct tcp_congestion_ops *e;
 54
 55	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 56		if (e->key == key)
 57			return e;
 58	}
 59
 60	return NULL;
 61}
 62
 63/*
 64 * Attach new congestion control algorithm to the list
 65 * of available options.
 66 */
 67int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 68{
 69	int ret = 0;
 70
 71	/* all algorithms must implement these */
 72	if (!ca->ssthresh || !ca->undo_cwnd ||
 73	    !(ca->cong_avoid || ca->cong_control)) {
 74		pr_err("%s does not implement required ops\n", ca->name);
 75		return -EINVAL;
 76	}
 77
 78	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
 79
 80	spin_lock(&tcp_cong_list_lock);
 81	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
 82		pr_notice("%s already registered or non-unique key\n",
 83			  ca->name);
 84		ret = -EEXIST;
 85	} else {
 86		list_add_tail_rcu(&ca->list, &tcp_cong_list);
 87		pr_debug("%s registered\n", ca->name);
 88	}
 89	spin_unlock(&tcp_cong_list_lock);
 90
 91	return ret;
 92}
 93EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
 94
 95/*
 96 * Remove congestion control algorithm, called from
 97 * the module's remove function.  Module ref counts are used
 98 * to ensure that this can't be done till all sockets using
 99 * that method are closed.
100 */
101void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
102{
103	spin_lock(&tcp_cong_list_lock);
104	list_del_rcu(&ca->list);
105	spin_unlock(&tcp_cong_list_lock);
106
107	/* Wait for outstanding readers to complete before the
108	 * module gets removed entirely.
109	 *
110	 * A try_module_get() should fail by now as our module is
111	 * in "going" state since no refs are held anymore and
112	 * module_exit() handler being called.
113	 */
114	synchronize_rcu();
115}
116EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
117
118u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
119{
120	const struct tcp_congestion_ops *ca;
121	u32 key = TCP_CA_UNSPEC;
122
123	might_sleep();
124
125	rcu_read_lock();
126	ca = __tcp_ca_find_autoload(name);
127	if (ca) {
128		key = ca->key;
129		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
130	}
131	rcu_read_unlock();
132
133	return key;
134}
135EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
136
137char *tcp_ca_get_name_by_key(u32 key, char *buffer)
138{
139	const struct tcp_congestion_ops *ca;
140	char *ret = NULL;
141
142	rcu_read_lock();
143	ca = tcp_ca_find_key(key);
144	if (ca)
145		ret = strncpy(buffer, ca->name,
146			      TCP_CA_NAME_MAX);
147	rcu_read_unlock();
148
149	return ret;
150}
151EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
152
153/* Assign choice of congestion control. */
154void tcp_assign_congestion_control(struct sock *sk)
155{
 
156	struct inet_connection_sock *icsk = inet_csk(sk);
157	struct tcp_congestion_ops *ca;
158
159	rcu_read_lock();
160	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
161		if (likely(try_module_get(ca->owner))) {
162			icsk->icsk_ca_ops = ca;
163			goto out;
164		}
165		/* Fallback to next available. The last really
166		 * guaranteed fallback is Reno from this list.
167		 */
168	}
169out:
170	rcu_read_unlock();
171
172	/* Clear out private data before diag gets it and
173	 * the ca has not been initialized.
174	 */
175	if (ca->get_info)
176		memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
177	if (ca->flags & TCP_CONG_NEEDS_ECN)
178		INET_ECN_xmit(sk);
179	else
180		INET_ECN_dontxmit(sk);
181}
182
183void tcp_init_congestion_control(struct sock *sk)
184{
185	const struct inet_connection_sock *icsk = inet_csk(sk);
186
 
187	if (icsk->icsk_ca_ops->init)
188		icsk->icsk_ca_ops->init(sk);
189	if (tcp_ca_needs_ecn(sk))
190		INET_ECN_xmit(sk);
191	else
192		INET_ECN_dontxmit(sk);
193}
194
195static void tcp_reinit_congestion_control(struct sock *sk,
196					  const struct tcp_congestion_ops *ca)
197{
198	struct inet_connection_sock *icsk = inet_csk(sk);
199
200	tcp_cleanup_congestion_control(sk);
201	icsk->icsk_ca_ops = ca;
202	icsk->icsk_ca_setsockopt = 1;
 
203
204	if (sk->sk_state != TCP_CLOSE) {
205		memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
206		tcp_init_congestion_control(sk);
207	}
208}
209
210/* Manage refcounts on socket close. */
211void tcp_cleanup_congestion_control(struct sock *sk)
212{
213	struct inet_connection_sock *icsk = inet_csk(sk);
214
215	if (icsk->icsk_ca_ops->release)
216		icsk->icsk_ca_ops->release(sk);
217	module_put(icsk->icsk_ca_ops->owner);
218}
219
220/* Used by sysctl to change default congestion control */
221int tcp_set_default_congestion_control(const char *name)
222{
223	struct tcp_congestion_ops *ca;
224	int ret = -ENOENT;
 
225
226	spin_lock(&tcp_cong_list_lock);
227	ca = tcp_ca_find(name);
228#ifdef CONFIG_MODULES
229	if (!ca && capable(CAP_NET_ADMIN)) {
230		spin_unlock(&tcp_cong_list_lock);
231
232		request_module("tcp_%s", name);
233		spin_lock(&tcp_cong_list_lock);
234		ca = tcp_ca_find(name);
235	}
236#endif
237
238	if (ca) {
239		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */
240		list_move(&ca->list, &tcp_cong_list);
241		ret = 0;
242	}
243	spin_unlock(&tcp_cong_list_lock);
244
245	return ret;
246}
247
248/* Set default value from kernel configuration at bootup */
249static int __init tcp_congestion_default(void)
250{
251	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
 
252}
253late_initcall(tcp_congestion_default);
254
255/* Build string with list of available congestion control values */
256void tcp_get_available_congestion_control(char *buf, size_t maxlen)
257{
258	struct tcp_congestion_ops *ca;
259	size_t offs = 0;
260
261	rcu_read_lock();
262	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
263		offs += snprintf(buf + offs, maxlen - offs,
264				 "%s%s",
265				 offs == 0 ? "" : " ", ca->name);
266	}
267	rcu_read_unlock();
268}
269
270/* Get current default congestion control */
271void tcp_get_default_congestion_control(char *name)
272{
273	struct tcp_congestion_ops *ca;
274	/* We will always have reno... */
275	BUG_ON(list_empty(&tcp_cong_list));
276
277	rcu_read_lock();
278	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
279	strncpy(name, ca->name, TCP_CA_NAME_MAX);
280	rcu_read_unlock();
281}
282
283/* Built list of non-restricted congestion control values */
284void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
285{
286	struct tcp_congestion_ops *ca;
287	size_t offs = 0;
288
289	*buf = '\0';
290	rcu_read_lock();
291	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
292		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
293			continue;
294		offs += snprintf(buf + offs, maxlen - offs,
295				 "%s%s",
296				 offs == 0 ? "" : " ", ca->name);
297	}
298	rcu_read_unlock();
299}
300
301/* Change list of non-restricted congestion control */
302int tcp_set_allowed_congestion_control(char *val)
303{
304	struct tcp_congestion_ops *ca;
305	char *saved_clone, *clone, *name;
306	int ret = 0;
307
308	saved_clone = clone = kstrdup(val, GFP_USER);
309	if (!clone)
310		return -ENOMEM;
311
312	spin_lock(&tcp_cong_list_lock);
313	/* pass 1 check for bad entries */
314	while ((name = strsep(&clone, " ")) && *name) {
315		ca = tcp_ca_find(name);
316		if (!ca) {
317			ret = -ENOENT;
318			goto out;
319		}
320	}
321
322	/* pass 2 clear old values */
323	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
324		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
325
326	/* pass 3 mark as allowed */
327	while ((name = strsep(&val, " ")) && *name) {
328		ca = tcp_ca_find(name);
329		WARN_ON(!ca);
330		if (ca)
331			ca->flags |= TCP_CONG_NON_RESTRICTED;
332	}
333out:
334	spin_unlock(&tcp_cong_list_lock);
335	kfree(saved_clone);
336
337	return ret;
338}
339
340/* Change congestion control for socket */
341int tcp_set_congestion_control(struct sock *sk, const char *name)
 
 
 
 
342{
343	struct inet_connection_sock *icsk = inet_csk(sk);
344	const struct tcp_congestion_ops *ca;
345	int err = 0;
346
347	if (icsk->icsk_ca_dst_locked)
348		return -EPERM;
349
350	rcu_read_lock();
351	ca = __tcp_ca_find_autoload(name);
 
 
 
 
352	/* No change asking for existing value */
353	if (ca == icsk->icsk_ca_ops) {
354		icsk->icsk_ca_setsockopt = 1;
355		goto out;
356	}
357	if (!ca)
 
358		err = -ENOENT;
359	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
360		   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
 
 
 
 
 
 
 
 
 
 
 
 
 
361		err = -EPERM;
362	else if (!try_module_get(ca->owner))
363		err = -EBUSY;
364	else
365		tcp_reinit_congestion_control(sk, ca);
 
366 out:
367	rcu_read_unlock();
368	return err;
369}
370
371/* Slow start is used when congestion window is no greater than the slow start
372 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
373 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
374 * something better;) a packet is only considered (s)acked in its entirety to
375 * defend the ACK attacks described in the RFC. Slow start processes a stretch
376 * ACK of degree N as if N acks of degree 1 are received back to back except
377 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
378 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
379 */
380u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
381{
382	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
383
384	acked -= cwnd - tp->snd_cwnd;
385	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
386
387	return acked;
388}
389EXPORT_SYMBOL_GPL(tcp_slow_start);
390
391/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
392 * for every packet that was ACKed.
393 */
394void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
395{
396	/* If credits accumulated at a higher w, apply them gently now. */
397	if (tp->snd_cwnd_cnt >= w) {
398		tp->snd_cwnd_cnt = 0;
399		tp->snd_cwnd++;
400	}
401
402	tp->snd_cwnd_cnt += acked;
403	if (tp->snd_cwnd_cnt >= w) {
404		u32 delta = tp->snd_cwnd_cnt / w;
405
406		tp->snd_cwnd_cnt -= delta * w;
407		tp->snd_cwnd += delta;
408	}
409	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
410}
411EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
412
413/*
414 * TCP Reno congestion control
415 * This is special case used for fallback as well.
416 */
417/* This is Jacobson's slow start and congestion avoidance.
418 * SIGCOMM '88, p. 328.
419 */
420void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
421{
422	struct tcp_sock *tp = tcp_sk(sk);
423
424	if (!tcp_is_cwnd_limited(sk))
425		return;
426
427	/* In "safe" area, increase. */
428	if (tcp_in_slow_start(tp)) {
429		acked = tcp_slow_start(tp, acked);
430		if (!acked)
431			return;
432	}
433	/* In dangerous area, increase slowly. */
434	tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
435}
436EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
437
438/* Slow start threshold is half the congestion window (min 2) */
439u32 tcp_reno_ssthresh(struct sock *sk)
440{
441	const struct tcp_sock *tp = tcp_sk(sk);
442
443	return max(tp->snd_cwnd >> 1U, 2U);
444}
445EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
446
447u32 tcp_reno_undo_cwnd(struct sock *sk)
448{
449	const struct tcp_sock *tp = tcp_sk(sk);
450
451	return max(tp->snd_cwnd, tp->snd_ssthresh << 1);
452}
453EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
454
455struct tcp_congestion_ops tcp_reno = {
456	.flags		= TCP_CONG_NON_RESTRICTED,
457	.name		= "reno",
458	.owner		= THIS_MODULE,
459	.ssthresh	= tcp_reno_ssthresh,
460	.cong_avoid	= tcp_reno_cong_avoid,
461	.undo_cwnd	= tcp_reno_undo_cwnd,
462};