Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Pluggable TCP congestion control support and newReno
  4 * congestion control.
  5 * Based on ideas from I/O scheduler support and Web100.
  6 *
  7 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  8 */
  9
 10#define pr_fmt(fmt) "TCP: " fmt
 11
 12#include <linux/module.h>
 13#include <linux/mm.h>
 14#include <linux/types.h>
 15#include <linux/list.h>
 16#include <linux/gfp.h>
 17#include <linux/jhash.h>
 18#include <net/tcp.h>
 19
 20static DEFINE_SPINLOCK(tcp_cong_list_lock);
 21static LIST_HEAD(tcp_cong_list);
 22
 23/* Simple linear search, don't expect many entries! */
 24struct tcp_congestion_ops *tcp_ca_find(const char *name)
 25{
 26	struct tcp_congestion_ops *e;
 27
 28	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 29		if (strcmp(e->name, name) == 0)
 30			return e;
 31	}
 32
 33	return NULL;
 34}
 35
 36/* Must be called with rcu lock held */
 37static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
 38						       const char *name)
 39{
 40	struct tcp_congestion_ops *ca = tcp_ca_find(name);
 41
 42#ifdef CONFIG_MODULES
 43	if (!ca && capable(CAP_NET_ADMIN)) {
 44		rcu_read_unlock();
 45		request_module("tcp_%s", name);
 46		rcu_read_lock();
 47		ca = tcp_ca_find(name);
 48	}
 49#endif
 50	return ca;
 51}
 52
 53/* Simple linear search, not much in here. */
 54struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
 55{
 56	struct tcp_congestion_ops *e;
 57
 58	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 59		if (e->key == key)
 60			return e;
 61	}
 62
 63	return NULL;
 64}
 65
 66/*
 67 * Attach new congestion control algorithm to the list
 68 * of available options.
 69 */
 70int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 71{
 72	int ret = 0;
 73
 74	/* all algorithms must implement these */
 75	if (!ca->ssthresh || !ca->undo_cwnd ||
 76	    !(ca->cong_avoid || ca->cong_control)) {
 77		pr_err("%s does not implement required ops\n", ca->name);
 78		return -EINVAL;
 79	}
 80
 81	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
 82
 83	spin_lock(&tcp_cong_list_lock);
 84	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
 85		pr_notice("%s already registered or non-unique key\n",
 86			  ca->name);
 87		ret = -EEXIST;
 88	} else {
 89		list_add_tail_rcu(&ca->list, &tcp_cong_list);
 90		pr_debug("%s registered\n", ca->name);
 91	}
 92	spin_unlock(&tcp_cong_list_lock);
 93
 94	return ret;
 95}
 96EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
 97
 98/*
 99 * Remove congestion control algorithm, called from
100 * the module's remove function.  Module ref counts are used
101 * to ensure that this can't be done till all sockets using
102 * that method are closed.
103 */
104void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
105{
106	spin_lock(&tcp_cong_list_lock);
107	list_del_rcu(&ca->list);
108	spin_unlock(&tcp_cong_list_lock);
109
110	/* Wait for outstanding readers to complete before the
111	 * module gets removed entirely.
112	 *
113	 * A try_module_get() should fail by now as our module is
114	 * in "going" state since no refs are held anymore and
115	 * module_exit() handler being called.
116	 */
117	synchronize_rcu();
118}
119EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
120
121u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
122{
123	const struct tcp_congestion_ops *ca;
124	u32 key = TCP_CA_UNSPEC;
125
126	might_sleep();
127
128	rcu_read_lock();
129	ca = tcp_ca_find_autoload(net, name);
130	if (ca) {
131		key = ca->key;
132		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
133	}
134	rcu_read_unlock();
135
136	return key;
137}
138EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
139
140char *tcp_ca_get_name_by_key(u32 key, char *buffer)
141{
142	const struct tcp_congestion_ops *ca;
143	char *ret = NULL;
144
145	rcu_read_lock();
146	ca = tcp_ca_find_key(key);
147	if (ca)
148		ret = strncpy(buffer, ca->name,
149			      TCP_CA_NAME_MAX);
150	rcu_read_unlock();
151
152	return ret;
153}
154EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
155
156/* Assign choice of congestion control. */
157void tcp_assign_congestion_control(struct sock *sk)
158{
159	struct net *net = sock_net(sk);
160	struct inet_connection_sock *icsk = inet_csk(sk);
161	const struct tcp_congestion_ops *ca;
162
163	rcu_read_lock();
164	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
165	if (unlikely(!bpf_try_module_get(ca, ca->owner)))
166		ca = &tcp_reno;
167	icsk->icsk_ca_ops = ca;
 
 
 
 
 
 
168	rcu_read_unlock();
169
170	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
 
 
 
171	if (ca->flags & TCP_CONG_NEEDS_ECN)
172		INET_ECN_xmit(sk);
173	else
174		INET_ECN_dontxmit(sk);
175}
176
177void tcp_init_congestion_control(struct sock *sk)
178{
179	const struct inet_connection_sock *icsk = inet_csk(sk);
180
181	tcp_sk(sk)->prior_ssthresh = 0;
182	if (icsk->icsk_ca_ops->init)
183		icsk->icsk_ca_ops->init(sk);
184	if (tcp_ca_needs_ecn(sk))
185		INET_ECN_xmit(sk);
186	else
187		INET_ECN_dontxmit(sk);
188}
189
190static void tcp_reinit_congestion_control(struct sock *sk,
191					  const struct tcp_congestion_ops *ca)
192{
193	struct inet_connection_sock *icsk = inet_csk(sk);
194
195	tcp_cleanup_congestion_control(sk);
196	icsk->icsk_ca_ops = ca;
197	icsk->icsk_ca_setsockopt = 1;
198	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
199
200	if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
201		tcp_init_congestion_control(sk);
202}
203
204/* Manage refcounts on socket close. */
205void tcp_cleanup_congestion_control(struct sock *sk)
206{
207	struct inet_connection_sock *icsk = inet_csk(sk);
208
209	if (icsk->icsk_ca_ops->release)
210		icsk->icsk_ca_ops->release(sk);
211	bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
212}
213
214/* Used by sysctl to change default congestion control */
215int tcp_set_default_congestion_control(struct net *net, const char *name)
216{
217	struct tcp_congestion_ops *ca;
218	const struct tcp_congestion_ops *prev;
219	int ret;
220
221	rcu_read_lock();
222	ca = tcp_ca_find_autoload(net, name);
223	if (!ca) {
224		ret = -ENOENT;
225	} else if (!bpf_try_module_get(ca, ca->owner)) {
226		ret = -EBUSY;
227	} else {
228		prev = xchg(&net->ipv4.tcp_congestion_control, ca);
229		if (prev)
230			bpf_module_put(prev, prev->owner);
 
231
232		ca->flags |= TCP_CONG_NON_RESTRICTED;
 
 
233		ret = 0;
234	}
235	rcu_read_unlock();
236
237	return ret;
238}
239
240/* Set default value from kernel configuration at bootup */
241static int __init tcp_congestion_default(void)
242{
243	return tcp_set_default_congestion_control(&init_net,
244						  CONFIG_DEFAULT_TCP_CONG);
245}
246late_initcall(tcp_congestion_default);
247
248/* Build string with list of available congestion control values */
249void tcp_get_available_congestion_control(char *buf, size_t maxlen)
250{
251	struct tcp_congestion_ops *ca;
252	size_t offs = 0;
253
254	rcu_read_lock();
255	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
256		offs += snprintf(buf + offs, maxlen - offs,
257				 "%s%s",
258				 offs == 0 ? "" : " ", ca->name);
259
260		if (WARN_ON_ONCE(offs >= maxlen))
261			break;
262	}
263	rcu_read_unlock();
264}
265
266/* Get current default congestion control */
267void tcp_get_default_congestion_control(struct net *net, char *name)
268{
269	const struct tcp_congestion_ops *ca;
 
 
270
271	rcu_read_lock();
272	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
273	strncpy(name, ca->name, TCP_CA_NAME_MAX);
274	rcu_read_unlock();
275}
276
277/* Built list of non-restricted congestion control values */
278void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
279{
280	struct tcp_congestion_ops *ca;
281	size_t offs = 0;
282
283	*buf = '\0';
284	rcu_read_lock();
285	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
286		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
287			continue;
288		offs += snprintf(buf + offs, maxlen - offs,
289				 "%s%s",
290				 offs == 0 ? "" : " ", ca->name);
291
292		if (WARN_ON_ONCE(offs >= maxlen))
293			break;
294	}
295	rcu_read_unlock();
296}
297
298/* Change list of non-restricted congestion control */
299int tcp_set_allowed_congestion_control(char *val)
300{
301	struct tcp_congestion_ops *ca;
302	char *saved_clone, *clone, *name;
303	int ret = 0;
304
305	saved_clone = clone = kstrdup(val, GFP_USER);
306	if (!clone)
307		return -ENOMEM;
308
309	spin_lock(&tcp_cong_list_lock);
310	/* pass 1 check for bad entries */
311	while ((name = strsep(&clone, " ")) && *name) {
312		ca = tcp_ca_find(name);
313		if (!ca) {
314			ret = -ENOENT;
315			goto out;
316		}
317	}
318
319	/* pass 2 clear old values */
320	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
321		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
322
323	/* pass 3 mark as allowed */
324	while ((name = strsep(&val, " ")) && *name) {
325		ca = tcp_ca_find(name);
326		WARN_ON(!ca);
327		if (ca)
328			ca->flags |= TCP_CONG_NON_RESTRICTED;
329	}
330out:
331	spin_unlock(&tcp_cong_list_lock);
332	kfree(saved_clone);
333
334	return ret;
335}
336
337/* Change congestion control for socket. If load is false, then it is the
338 * responsibility of the caller to call tcp_init_congestion_control or
339 * tcp_reinit_congestion_control (if the current congestion control was
340 * already initialized.
341 */
342int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
343			       bool reinit, bool cap_net_admin)
344{
345	struct inet_connection_sock *icsk = inet_csk(sk);
346	const struct tcp_congestion_ops *ca;
347	int err = 0;
348
349	if (icsk->icsk_ca_dst_locked)
350		return -EPERM;
351
352	rcu_read_lock();
353	if (!load)
354		ca = tcp_ca_find(name);
355	else
356		ca = tcp_ca_find_autoload(sock_net(sk), name);
357
358	/* No change asking for existing value */
359	if (ca == icsk->icsk_ca_ops) {
360		icsk->icsk_ca_setsockopt = 1;
361		goto out;
362	}
363
364	if (!ca) {
365		err = -ENOENT;
366	} else if (!load) {
367		const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
368
369		if (bpf_try_module_get(ca, ca->owner)) {
370			if (reinit) {
371				tcp_reinit_congestion_control(sk, ca);
372			} else {
373				icsk->icsk_ca_ops = ca;
374				bpf_module_put(old_ca, old_ca->owner);
375			}
376		} else {
377			err = -EBUSY;
378		}
379	} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
380		err = -EPERM;
381	} else if (!bpf_try_module_get(ca, ca->owner)) {
382		err = -EBUSY;
383	} else {
384		tcp_reinit_congestion_control(sk, ca);
385	}
386 out:
387	rcu_read_unlock();
388	return err;
389}
390
391/* Slow start is used when congestion window is no greater than the slow start
392 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
393 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
394 * something better;) a packet is only considered (s)acked in its entirety to
395 * defend the ACK attacks described in the RFC. Slow start processes a stretch
396 * ACK of degree N as if N acks of degree 1 are received back to back except
397 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
398 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
399 */
400u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
401{
402	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
403
404	acked -= cwnd - tp->snd_cwnd;
405	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
406
407	return acked;
408}
409EXPORT_SYMBOL_GPL(tcp_slow_start);
410
411/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
412 * for every packet that was ACKed.
413 */
414void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
415{
416	/* If credits accumulated at a higher w, apply them gently now. */
417	if (tp->snd_cwnd_cnt >= w) {
418		tp->snd_cwnd_cnt = 0;
419		tp->snd_cwnd++;
420	}
421
422	tp->snd_cwnd_cnt += acked;
423	if (tp->snd_cwnd_cnt >= w) {
424		u32 delta = tp->snd_cwnd_cnt / w;
425
426		tp->snd_cwnd_cnt -= delta * w;
427		tp->snd_cwnd += delta;
428	}
429	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
430}
431EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
432
433/*
434 * TCP Reno congestion control
435 * This is special case used for fallback as well.
436 */
437/* This is Jacobson's slow start and congestion avoidance.
438 * SIGCOMM '88, p. 328.
439 */
440void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
441{
442	struct tcp_sock *tp = tcp_sk(sk);
443
444	if (!tcp_is_cwnd_limited(sk))
445		return;
446
447	/* In "safe" area, increase. */
448	if (tcp_in_slow_start(tp)) {
449		acked = tcp_slow_start(tp, acked);
450		if (!acked)
451			return;
452	}
453	/* In dangerous area, increase slowly. */
454	tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
455}
456EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
457
458/* Slow start threshold is half the congestion window (min 2) */
459u32 tcp_reno_ssthresh(struct sock *sk)
460{
461	const struct tcp_sock *tp = tcp_sk(sk);
462
463	return max(tp->snd_cwnd >> 1U, 2U);
464}
465EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
466
467u32 tcp_reno_undo_cwnd(struct sock *sk)
468{
469	const struct tcp_sock *tp = tcp_sk(sk);
470
471	return max(tp->snd_cwnd, tp->prior_cwnd);
472}
473EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
474
475struct tcp_congestion_ops tcp_reno = {
476	.flags		= TCP_CONG_NON_RESTRICTED,
477	.name		= "reno",
478	.owner		= THIS_MODULE,
479	.ssthresh	= tcp_reno_ssthresh,
480	.cong_avoid	= tcp_reno_cong_avoid,
481	.undo_cwnd	= tcp_reno_undo_cwnd,
482};
v4.6
 
  1/*
  2 * Pluggable TCP congestion control support and newReno
  3 * congestion control.
  4 * Based on ideas from I/O scheduler support and Web100.
  5 *
  6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  7 */
  8
  9#define pr_fmt(fmt) "TCP: " fmt
 10
 11#include <linux/module.h>
 12#include <linux/mm.h>
 13#include <linux/types.h>
 14#include <linux/list.h>
 15#include <linux/gfp.h>
 16#include <linux/jhash.h>
 17#include <net/tcp.h>
 18
 19static DEFINE_SPINLOCK(tcp_cong_list_lock);
 20static LIST_HEAD(tcp_cong_list);
 21
 22/* Simple linear search, don't expect many entries! */
 23static struct tcp_congestion_ops *tcp_ca_find(const char *name)
 24{
 25	struct tcp_congestion_ops *e;
 26
 27	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 28		if (strcmp(e->name, name) == 0)
 29			return e;
 30	}
 31
 32	return NULL;
 33}
 34
 35/* Must be called with rcu lock held */
 36static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
 
 37{
 38	const struct tcp_congestion_ops *ca = tcp_ca_find(name);
 
 39#ifdef CONFIG_MODULES
 40	if (!ca && capable(CAP_NET_ADMIN)) {
 41		rcu_read_unlock();
 42		request_module("tcp_%s", name);
 43		rcu_read_lock();
 44		ca = tcp_ca_find(name);
 45	}
 46#endif
 47	return ca;
 48}
 49
 50/* Simple linear search, not much in here. */
 51struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
 52{
 53	struct tcp_congestion_ops *e;
 54
 55	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 56		if (e->key == key)
 57			return e;
 58	}
 59
 60	return NULL;
 61}
 62
 63/*
 64 * Attach new congestion control algorithm to the list
 65 * of available options.
 66 */
 67int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 68{
 69	int ret = 0;
 70
 71	/* all algorithms must implement ssthresh and cong_avoid ops */
 72	if (!ca->ssthresh || !ca->cong_avoid) {
 
 73		pr_err("%s does not implement required ops\n", ca->name);
 74		return -EINVAL;
 75	}
 76
 77	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
 78
 79	spin_lock(&tcp_cong_list_lock);
 80	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
 81		pr_notice("%s already registered or non-unique key\n",
 82			  ca->name);
 83		ret = -EEXIST;
 84	} else {
 85		list_add_tail_rcu(&ca->list, &tcp_cong_list);
 86		pr_debug("%s registered\n", ca->name);
 87	}
 88	spin_unlock(&tcp_cong_list_lock);
 89
 90	return ret;
 91}
 92EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
 93
 94/*
 95 * Remove congestion control algorithm, called from
 96 * the module's remove function.  Module ref counts are used
 97 * to ensure that this can't be done till all sockets using
 98 * that method are closed.
 99 */
100void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
101{
102	spin_lock(&tcp_cong_list_lock);
103	list_del_rcu(&ca->list);
104	spin_unlock(&tcp_cong_list_lock);
105
106	/* Wait for outstanding readers to complete before the
107	 * module gets removed entirely.
108	 *
109	 * A try_module_get() should fail by now as our module is
110	 * in "going" state since no refs are held anymore and
111	 * module_exit() handler being called.
112	 */
113	synchronize_rcu();
114}
115EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
116
117u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
118{
119	const struct tcp_congestion_ops *ca;
120	u32 key = TCP_CA_UNSPEC;
121
122	might_sleep();
123
124	rcu_read_lock();
125	ca = __tcp_ca_find_autoload(name);
126	if (ca) {
127		key = ca->key;
128		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
129	}
130	rcu_read_unlock();
131
132	return key;
133}
134EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
135
136char *tcp_ca_get_name_by_key(u32 key, char *buffer)
137{
138	const struct tcp_congestion_ops *ca;
139	char *ret = NULL;
140
141	rcu_read_lock();
142	ca = tcp_ca_find_key(key);
143	if (ca)
144		ret = strncpy(buffer, ca->name,
145			      TCP_CA_NAME_MAX);
146	rcu_read_unlock();
147
148	return ret;
149}
150EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
151
152/* Assign choice of congestion control. */
153void tcp_assign_congestion_control(struct sock *sk)
154{
 
155	struct inet_connection_sock *icsk = inet_csk(sk);
156	struct tcp_congestion_ops *ca;
157
158	rcu_read_lock();
159	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
160		if (likely(try_module_get(ca->owner))) {
161			icsk->icsk_ca_ops = ca;
162			goto out;
163		}
164		/* Fallback to next available. The last really
165		 * guaranteed fallback is Reno from this list.
166		 */
167	}
168out:
169	rcu_read_unlock();
170
171	/* Clear out private data before diag gets it and
172	 * the ca has not been initialized.
173	 */
174	if (ca->get_info)
175		memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
176	if (ca->flags & TCP_CONG_NEEDS_ECN)
177		INET_ECN_xmit(sk);
178	else
179		INET_ECN_dontxmit(sk);
180}
181
182void tcp_init_congestion_control(struct sock *sk)
183{
184	const struct inet_connection_sock *icsk = inet_csk(sk);
185
 
186	if (icsk->icsk_ca_ops->init)
187		icsk->icsk_ca_ops->init(sk);
188	if (tcp_ca_needs_ecn(sk))
189		INET_ECN_xmit(sk);
190	else
191		INET_ECN_dontxmit(sk);
192}
193
194static void tcp_reinit_congestion_control(struct sock *sk,
195					  const struct tcp_congestion_ops *ca)
196{
197	struct inet_connection_sock *icsk = inet_csk(sk);
198
199	tcp_cleanup_congestion_control(sk);
200	icsk->icsk_ca_ops = ca;
201	icsk->icsk_ca_setsockopt = 1;
 
202
203	if (sk->sk_state != TCP_CLOSE)
204		tcp_init_congestion_control(sk);
205}
206
207/* Manage refcounts on socket close. */
208void tcp_cleanup_congestion_control(struct sock *sk)
209{
210	struct inet_connection_sock *icsk = inet_csk(sk);
211
212	if (icsk->icsk_ca_ops->release)
213		icsk->icsk_ca_ops->release(sk);
214	module_put(icsk->icsk_ca_ops->owner);
215}
216
217/* Used by sysctl to change default congestion control */
218int tcp_set_default_congestion_control(const char *name)
219{
220	struct tcp_congestion_ops *ca;
221	int ret = -ENOENT;
 
222
223	spin_lock(&tcp_cong_list_lock);
224	ca = tcp_ca_find(name);
225#ifdef CONFIG_MODULES
226	if (!ca && capable(CAP_NET_ADMIN)) {
227		spin_unlock(&tcp_cong_list_lock);
228
229		request_module("tcp_%s", name);
230		spin_lock(&tcp_cong_list_lock);
231		ca = tcp_ca_find(name);
232	}
233#endif
234
235	if (ca) {
236		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */
237		list_move(&ca->list, &tcp_cong_list);
238		ret = 0;
239	}
240	spin_unlock(&tcp_cong_list_lock);
241
242	return ret;
243}
244
245/* Set default value from kernel configuration at bootup */
246static int __init tcp_congestion_default(void)
247{
248	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
 
249}
250late_initcall(tcp_congestion_default);
251
252/* Build string with list of available congestion control values */
253void tcp_get_available_congestion_control(char *buf, size_t maxlen)
254{
255	struct tcp_congestion_ops *ca;
256	size_t offs = 0;
257
258	rcu_read_lock();
259	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
260		offs += snprintf(buf + offs, maxlen - offs,
261				 "%s%s",
262				 offs == 0 ? "" : " ", ca->name);
 
 
 
263	}
264	rcu_read_unlock();
265}
266
267/* Get current default congestion control */
268void tcp_get_default_congestion_control(char *name)
269{
270	struct tcp_congestion_ops *ca;
271	/* We will always have reno... */
272	BUG_ON(list_empty(&tcp_cong_list));
273
274	rcu_read_lock();
275	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
276	strncpy(name, ca->name, TCP_CA_NAME_MAX);
277	rcu_read_unlock();
278}
279
280/* Built list of non-restricted congestion control values */
281void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
282{
283	struct tcp_congestion_ops *ca;
284	size_t offs = 0;
285
286	*buf = '\0';
287	rcu_read_lock();
288	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
289		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
290			continue;
291		offs += snprintf(buf + offs, maxlen - offs,
292				 "%s%s",
293				 offs == 0 ? "" : " ", ca->name);
 
 
 
294	}
295	rcu_read_unlock();
296}
297
298/* Change list of non-restricted congestion control */
299int tcp_set_allowed_congestion_control(char *val)
300{
301	struct tcp_congestion_ops *ca;
302	char *saved_clone, *clone, *name;
303	int ret = 0;
304
305	saved_clone = clone = kstrdup(val, GFP_USER);
306	if (!clone)
307		return -ENOMEM;
308
309	spin_lock(&tcp_cong_list_lock);
310	/* pass 1 check for bad entries */
311	while ((name = strsep(&clone, " ")) && *name) {
312		ca = tcp_ca_find(name);
313		if (!ca) {
314			ret = -ENOENT;
315			goto out;
316		}
317	}
318
319	/* pass 2 clear old values */
320	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
321		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
322
323	/* pass 3 mark as allowed */
324	while ((name = strsep(&val, " ")) && *name) {
325		ca = tcp_ca_find(name);
326		WARN_ON(!ca);
327		if (ca)
328			ca->flags |= TCP_CONG_NON_RESTRICTED;
329	}
330out:
331	spin_unlock(&tcp_cong_list_lock);
332	kfree(saved_clone);
333
334	return ret;
335}
336
337/* Change congestion control for socket */
338int tcp_set_congestion_control(struct sock *sk, const char *name)
 
 
 
 
 
339{
340	struct inet_connection_sock *icsk = inet_csk(sk);
341	const struct tcp_congestion_ops *ca;
342	int err = 0;
343
344	if (icsk->icsk_ca_dst_locked)
345		return -EPERM;
346
347	rcu_read_lock();
348	ca = __tcp_ca_find_autoload(name);
 
 
 
 
349	/* No change asking for existing value */
350	if (ca == icsk->icsk_ca_ops) {
351		icsk->icsk_ca_setsockopt = 1;
352		goto out;
353	}
354	if (!ca)
 
355		err = -ENOENT;
356	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
357		   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
 
 
 
 
 
 
 
 
 
 
 
 
358		err = -EPERM;
359	else if (!try_module_get(ca->owner))
360		err = -EBUSY;
361	else
362		tcp_reinit_congestion_control(sk, ca);
 
363 out:
364	rcu_read_unlock();
365	return err;
366}
367
368/* Slow start is used when congestion window is no greater than the slow start
369 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
370 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
371 * something better;) a packet is only considered (s)acked in its entirety to
372 * defend the ACK attacks described in the RFC. Slow start processes a stretch
373 * ACK of degree N as if N acks of degree 1 are received back to back except
374 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
375 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
376 */
377u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
378{
379	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
380
381	acked -= cwnd - tp->snd_cwnd;
382	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
383
384	return acked;
385}
386EXPORT_SYMBOL_GPL(tcp_slow_start);
387
388/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
389 * for every packet that was ACKed.
390 */
391void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
392{
393	/* If credits accumulated at a higher w, apply them gently now. */
394	if (tp->snd_cwnd_cnt >= w) {
395		tp->snd_cwnd_cnt = 0;
396		tp->snd_cwnd++;
397	}
398
399	tp->snd_cwnd_cnt += acked;
400	if (tp->snd_cwnd_cnt >= w) {
401		u32 delta = tp->snd_cwnd_cnt / w;
402
403		tp->snd_cwnd_cnt -= delta * w;
404		tp->snd_cwnd += delta;
405	}
406	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
407}
408EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
409
410/*
411 * TCP Reno congestion control
412 * This is special case used for fallback as well.
413 */
414/* This is Jacobson's slow start and congestion avoidance.
415 * SIGCOMM '88, p. 328.
416 */
417void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
418{
419	struct tcp_sock *tp = tcp_sk(sk);
420
421	if (!tcp_is_cwnd_limited(sk))
422		return;
423
424	/* In "safe" area, increase. */
425	if (tcp_in_slow_start(tp)) {
426		acked = tcp_slow_start(tp, acked);
427		if (!acked)
428			return;
429	}
430	/* In dangerous area, increase slowly. */
431	tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
432}
433EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
434
435/* Slow start threshold is half the congestion window (min 2) */
436u32 tcp_reno_ssthresh(struct sock *sk)
437{
438	const struct tcp_sock *tp = tcp_sk(sk);
439
440	return max(tp->snd_cwnd >> 1U, 2U);
441}
442EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
443
 
 
 
 
 
 
 
 
444struct tcp_congestion_ops tcp_reno = {
445	.flags		= TCP_CONG_NON_RESTRICTED,
446	.name		= "reno",
447	.owner		= THIS_MODULE,
448	.ssthresh	= tcp_reno_ssthresh,
449	.cong_avoid	= tcp_reno_cong_avoid,
 
450};