Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Pluggable TCP congestion control support and newReno
  4 * congestion control.
  5 * Based on ideas from I/O scheduler support and Web100.
  6 *
  7 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  8 */
  9
 10#define pr_fmt(fmt) "TCP: " fmt
 11
 12#include <linux/module.h>
 13#include <linux/mm.h>
 14#include <linux/types.h>
 15#include <linux/list.h>
 16#include <linux/gfp.h>
 17#include <linux/jhash.h>
 18#include <net/tcp.h>
 19#include <trace/events/tcp.h>
 
 20
 21static DEFINE_SPINLOCK(tcp_cong_list_lock);
 22static LIST_HEAD(tcp_cong_list);
 23
 24/* Simple linear search, don't expect many entries! */
 25struct tcp_congestion_ops *tcp_ca_find(const char *name)
 26{
 27	struct tcp_congestion_ops *e;
 28
 29	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 30		if (strcmp(e->name, name) == 0)
 31			return e;
 32	}
 33
 34	return NULL;
 35}
 36
 37void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
 38{
 39	struct inet_connection_sock *icsk = inet_csk(sk);
 40
 41	trace_tcp_cong_state_set(sk, ca_state);
 42
 43	if (icsk->icsk_ca_ops->set_state)
 44		icsk->icsk_ca_ops->set_state(sk, ca_state);
 45	icsk->icsk_ca_state = ca_state;
 46}
 47
 48/* Must be called with rcu lock held */
 49static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
 50						       const char *name)
 51{
 52	struct tcp_congestion_ops *ca = tcp_ca_find(name);
 53
 54#ifdef CONFIG_MODULES
 55	if (!ca && capable(CAP_NET_ADMIN)) {
 56		rcu_read_unlock();
 57		request_module("tcp_%s", name);
 58		rcu_read_lock();
 59		ca = tcp_ca_find(name);
 60	}
 61#endif
 62	return ca;
 63}
 64
 65/* Simple linear search, not much in here. */
 66struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
 67{
 68	struct tcp_congestion_ops *e;
 69
 70	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 71		if (e->key == key)
 72			return e;
 73	}
 74
 75	return NULL;
 76}
 77
 78int tcp_validate_congestion_control(struct tcp_congestion_ops *ca)
 79{
 80	/* all algorithms must implement these */
 81	if (!ca->ssthresh || !ca->undo_cwnd ||
 82	    !(ca->cong_avoid || ca->cong_control)) {
 83		pr_err("%s does not implement required ops\n", ca->name);
 84		return -EINVAL;
 85	}
 86
 87	return 0;
 88}
 89
 90/* Attach new congestion control algorithm to the list
 91 * of available options.
 92 */
 93int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 94{
 95	int ret;
 96
 97	ret = tcp_validate_congestion_control(ca);
 98	if (ret)
 99		return ret;
100
101	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
 
 
 
 
 
102
103	spin_lock(&tcp_cong_list_lock);
104	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
105		pr_notice("%s already registered or non-unique key\n",
106			  ca->name);
107		ret = -EEXIST;
108	} else {
109		list_add_tail_rcu(&ca->list, &tcp_cong_list);
110		pr_debug("%s registered\n", ca->name);
111	}
112	spin_unlock(&tcp_cong_list_lock);
113
114	return ret;
115}
116EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
117
118/*
119 * Remove congestion control algorithm, called from
120 * the module's remove function.  Module ref counts are used
121 * to ensure that this can't be done till all sockets using
122 * that method are closed.
123 */
124void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
125{
126	spin_lock(&tcp_cong_list_lock);
127	list_del_rcu(&ca->list);
128	spin_unlock(&tcp_cong_list_lock);
129
130	/* Wait for outstanding readers to complete before the
131	 * module gets removed entirely.
132	 *
133	 * A try_module_get() should fail by now as our module is
134	 * in "going" state since no refs are held anymore and
135	 * module_exit() handler being called.
136	 */
137	synchronize_rcu();
138}
139EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
140
141/* Replace a registered old ca with a new one.
142 *
143 * The new ca must have the same name as the old one, that has been
144 * registered.
145 */
146int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
147{
148	struct tcp_congestion_ops *existing;
149	int ret = 0;
150
151	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
152
153	spin_lock(&tcp_cong_list_lock);
154	existing = tcp_ca_find_key(old_ca->key);
155	if (ca->key == TCP_CA_UNSPEC || !existing || strcmp(existing->name, ca->name)) {
156		pr_notice("%s not registered or non-unique key\n",
157			  ca->name);
158		ret = -EINVAL;
159	} else if (existing != old_ca) {
160		pr_notice("invalid old congestion control algorithm to replace\n");
161		ret = -EINVAL;
162	} else {
163		/* Add the new one before removing the old one to keep
164		 * one implementation available all the time.
165		 */
166		list_add_tail_rcu(&ca->list, &tcp_cong_list);
167		list_del_rcu(&existing->list);
168		pr_debug("%s updated\n", ca->name);
169	}
170	spin_unlock(&tcp_cong_list_lock);
171
172	/* Wait for outstanding readers to complete before the
173	 * module or struct_ops gets removed entirely.
174	 */
175	if (!ret)
176		synchronize_rcu();
177
178	return ret;
179}
180
181u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
182{
183	const struct tcp_congestion_ops *ca;
184	u32 key = TCP_CA_UNSPEC;
185
186	might_sleep();
187
188	rcu_read_lock();
189	ca = tcp_ca_find_autoload(net, name);
190	if (ca) {
191		key = ca->key;
192		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
193	}
194	rcu_read_unlock();
195
196	return key;
197}
198
199char *tcp_ca_get_name_by_key(u32 key, char *buffer)
200{
201	const struct tcp_congestion_ops *ca;
202	char *ret = NULL;
203
204	rcu_read_lock();
205	ca = tcp_ca_find_key(key);
206	if (ca)
207		ret = strncpy(buffer, ca->name,
208			      TCP_CA_NAME_MAX);
209	rcu_read_unlock();
210
211	return ret;
212}
213
214/* Assign choice of congestion control. */
215void tcp_assign_congestion_control(struct sock *sk)
216{
217	struct net *net = sock_net(sk);
218	struct inet_connection_sock *icsk = inet_csk(sk);
219	const struct tcp_congestion_ops *ca;
220
221	rcu_read_lock();
222	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
223	if (unlikely(!bpf_try_module_get(ca, ca->owner)))
224		ca = &tcp_reno;
225	icsk->icsk_ca_ops = ca;
226	rcu_read_unlock();
227
228	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
229	if (ca->flags & TCP_CONG_NEEDS_ECN)
230		INET_ECN_xmit(sk);
231	else
232		INET_ECN_dontxmit(sk);
233}
 
 
234
235void tcp_init_congestion_control(struct sock *sk)
236{
237	struct inet_connection_sock *icsk = inet_csk(sk);
 
238
239	tcp_sk(sk)->prior_ssthresh = 0;
240	if (icsk->icsk_ca_ops->init)
241		icsk->icsk_ca_ops->init(sk);
242	if (tcp_ca_needs_ecn(sk))
243		INET_ECN_xmit(sk);
244	else
245		INET_ECN_dontxmit(sk);
246	icsk->icsk_ca_initialized = 1;
247}
248
249static void tcp_reinit_congestion_control(struct sock *sk,
250					  const struct tcp_congestion_ops *ca)
251{
252	struct inet_connection_sock *icsk = inet_csk(sk);
253
254	tcp_cleanup_congestion_control(sk);
255	icsk->icsk_ca_ops = ca;
256	icsk->icsk_ca_setsockopt = 1;
257	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
258
259	if (ca->flags & TCP_CONG_NEEDS_ECN)
260		INET_ECN_xmit(sk);
261	else
262		INET_ECN_dontxmit(sk);
263
264	if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
265		tcp_init_congestion_control(sk);
266}
267
268/* Manage refcounts on socket close. */
269void tcp_cleanup_congestion_control(struct sock *sk)
270{
271	struct inet_connection_sock *icsk = inet_csk(sk);
272
273	if (icsk->icsk_ca_ops->release)
274		icsk->icsk_ca_ops->release(sk);
275	bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
276}
277
278/* Used by sysctl to change default congestion control */
279int tcp_set_default_congestion_control(struct net *net, const char *name)
280{
281	struct tcp_congestion_ops *ca;
282	const struct tcp_congestion_ops *prev;
283	int ret;
284
285	rcu_read_lock();
286	ca = tcp_ca_find_autoload(net, name);
287	if (!ca) {
288		ret = -ENOENT;
289	} else if (!bpf_try_module_get(ca, ca->owner)) {
290		ret = -EBUSY;
291	} else if (!net_eq(net, &init_net) &&
292			!(ca->flags & TCP_CONG_NON_RESTRICTED)) {
293		/* Only init netns can set default to a restricted algorithm */
294		ret = -EPERM;
295	} else {
296		prev = xchg(&net->ipv4.tcp_congestion_control, ca);
297		if (prev)
298			bpf_module_put(prev, prev->owner);
299
300		ca->flags |= TCP_CONG_NON_RESTRICTED;
 
 
 
 
 
 
 
 
301		ret = 0;
302	}
303	rcu_read_unlock();
304
305	return ret;
306}
307
308/* Set default value from kernel configuration at bootup */
309static int __init tcp_congestion_default(void)
310{
311	return tcp_set_default_congestion_control(&init_net,
312						  CONFIG_DEFAULT_TCP_CONG);
313}
314late_initcall(tcp_congestion_default);
315
 
316/* Build string with list of available congestion control values */
317void tcp_get_available_congestion_control(char *buf, size_t maxlen)
318{
319	struct tcp_congestion_ops *ca;
320	size_t offs = 0;
321
322	rcu_read_lock();
323	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
324		offs += snprintf(buf + offs, maxlen - offs,
325				 "%s%s",
326				 offs == 0 ? "" : " ", ca->name);
327
328		if (WARN_ON_ONCE(offs >= maxlen))
329			break;
330	}
331	rcu_read_unlock();
332}
333
334/* Get current default congestion control */
335void tcp_get_default_congestion_control(struct net *net, char *name)
336{
337	const struct tcp_congestion_ops *ca;
 
 
338
339	rcu_read_lock();
340	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
341	strncpy(name, ca->name, TCP_CA_NAME_MAX);
342	rcu_read_unlock();
343}
344
345/* Built list of non-restricted congestion control values */
346void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
347{
348	struct tcp_congestion_ops *ca;
349	size_t offs = 0;
350
351	*buf = '\0';
352	rcu_read_lock();
353	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
354		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
355			continue;
356		offs += snprintf(buf + offs, maxlen - offs,
357				 "%s%s",
358				 offs == 0 ? "" : " ", ca->name);
359
360		if (WARN_ON_ONCE(offs >= maxlen))
361			break;
362	}
363	rcu_read_unlock();
364}
365
366/* Change list of non-restricted congestion control */
367int tcp_set_allowed_congestion_control(char *val)
368{
369	struct tcp_congestion_ops *ca;
370	char *saved_clone, *clone, *name;
371	int ret = 0;
372
373	saved_clone = clone = kstrdup(val, GFP_USER);
374	if (!clone)
375		return -ENOMEM;
376
377	spin_lock(&tcp_cong_list_lock);
378	/* pass 1 check for bad entries */
379	while ((name = strsep(&clone, " ")) && *name) {
380		ca = tcp_ca_find(name);
381		if (!ca) {
382			ret = -ENOENT;
383			goto out;
384		}
385	}
386
387	/* pass 2 clear old values */
388	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
389		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
390
391	/* pass 3 mark as allowed */
392	while ((name = strsep(&val, " ")) && *name) {
393		ca = tcp_ca_find(name);
394		WARN_ON(!ca);
395		if (ca)
396			ca->flags |= TCP_CONG_NON_RESTRICTED;
397	}
398out:
399	spin_unlock(&tcp_cong_list_lock);
400	kfree(saved_clone);
401
402	return ret;
403}
404
405/* Change congestion control for socket. If load is false, then it is the
406 * responsibility of the caller to call tcp_init_congestion_control or
407 * tcp_reinit_congestion_control (if the current congestion control was
408 * already initialized.
409 */
410int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
411			       bool cap_net_admin)
412{
413	struct inet_connection_sock *icsk = inet_csk(sk);
414	const struct tcp_congestion_ops *ca;
415	int err = 0;
416
417	if (icsk->icsk_ca_dst_locked)
418		return -EPERM;
419
420	rcu_read_lock();
421	if (!load)
422		ca = tcp_ca_find(name);
423	else
424		ca = tcp_ca_find_autoload(sock_net(sk), name);
425
426	/* No change asking for existing value */
427	if (ca == icsk->icsk_ca_ops) {
428		icsk->icsk_ca_setsockopt = 1;
429		goto out;
430	}
431
 
 
 
 
 
 
 
 
 
432	if (!ca)
433		err = -ENOENT;
434	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
 
435		err = -EPERM;
436	else if (!bpf_try_module_get(ca, ca->owner))
 
437		err = -EBUSY;
438	else
439		tcp_reinit_congestion_control(sk, ca);
 
 
 
 
 
 
440 out:
441	rcu_read_unlock();
442	return err;
443}
444
445/* Slow start is used when congestion window is no greater than the slow start
446 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
447 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
448 * something better;) a packet is only considered (s)acked in its entirety to
449 * defend the ACK attacks described in the RFC. Slow start processes a stretch
450 * ACK of degree N as if N acks of degree 1 are received back to back except
451 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
452 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
453 */
454__bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
455{
456	u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
 
457
458	acked -= cwnd - tcp_snd_cwnd(tp);
459	tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
460
461	return acked;
 
 
 
 
 
462}
463EXPORT_SYMBOL_GPL(tcp_slow_start);
464
465/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
466 * for every packet that was ACKed.
 
 
 
 
467 */
468__bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
469{
470	/* If credits accumulated at a higher w, apply them gently now. */
471	if (tp->snd_cwnd_cnt >= w) {
472		tp->snd_cwnd_cnt = 0;
473		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474	}
 
 
475
476	tp->snd_cwnd_cnt += acked;
 
 
477	if (tp->snd_cwnd_cnt >= w) {
478		u32 delta = tp->snd_cwnd_cnt / w;
479
480		tp->snd_cwnd_cnt -= delta * w;
481		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
 
482	}
483	tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
484}
485EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
486
487/*
488 * TCP Reno congestion control
489 * This is special case used for fallback as well.
490 */
491/* This is Jacobson's slow start and congestion avoidance.
492 * SIGCOMM '88, p. 328.
493 */
494__bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
495{
496	struct tcp_sock *tp = tcp_sk(sk);
497
498	if (!tcp_is_cwnd_limited(sk))
499		return;
500
501	/* In "safe" area, increase. */
502	if (tcp_in_slow_start(tp)) {
503		acked = tcp_slow_start(tp, acked);
504		if (!acked)
505			return;
506	}
507	/* In dangerous area, increase slowly. */
508	tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
 
 
 
 
 
 
 
 
 
 
 
509}
510EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
511
512/* Slow start threshold is half the congestion window (min 2) */
513__bpf_kfunc u32 tcp_reno_ssthresh(struct sock *sk)
514{
515	const struct tcp_sock *tp = tcp_sk(sk);
516
517	return max(tcp_snd_cwnd(tp) >> 1U, 2U);
518}
519EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
520
521__bpf_kfunc u32 tcp_reno_undo_cwnd(struct sock *sk)
 
522{
523	const struct tcp_sock *tp = tcp_sk(sk);
524
525	return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
526}
527EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
528
529struct tcp_congestion_ops tcp_reno = {
530	.flags		= TCP_CONG_NON_RESTRICTED,
531	.name		= "reno",
532	.owner		= THIS_MODULE,
533	.ssthresh	= tcp_reno_ssthresh,
534	.cong_avoid	= tcp_reno_cong_avoid,
535	.undo_cwnd	= tcp_reno_undo_cwnd,
 
 
 
 
 
 
 
 
 
 
 
 
536};
v3.1
 
  1/*
  2 * Plugable TCP congestion control support and newReno
  3 * congestion control.
  4 * Based on ideas from I/O scheduler suport and Web100.
  5 *
  6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  7 */
  8
 
 
  9#include <linux/module.h>
 10#include <linux/mm.h>
 11#include <linux/types.h>
 12#include <linux/list.h>
 13#include <linux/gfp.h>
 
 14#include <net/tcp.h>
 15
 16int sysctl_tcp_max_ssthresh = 0;
 17
 18static DEFINE_SPINLOCK(tcp_cong_list_lock);
 19static LIST_HEAD(tcp_cong_list);
 20
 21/* Simple linear search, don't expect many entries! */
 22static struct tcp_congestion_ops *tcp_ca_find(const char *name)
 23{
 24	struct tcp_congestion_ops *e;
 25
 26	list_for_each_entry_rcu(e, &tcp_cong_list, list) {
 27		if (strcmp(e->name, name) == 0)
 28			return e;
 29	}
 30
 31	return NULL;
 32}
 33
 34/*
 35 * Attach new congestion control algorithm to the list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36 * of available options.
 37 */
 38int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
 39{
 40	int ret = 0;
 
 
 
 
 41
 42	/* all algorithms must implement ssthresh and cong_avoid ops */
 43	if (!ca->ssthresh || !ca->cong_avoid) {
 44		printk(KERN_ERR "TCP %s does not implement required ops\n",
 45		       ca->name);
 46		return -EINVAL;
 47	}
 48
 49	spin_lock(&tcp_cong_list_lock);
 50	if (tcp_ca_find(ca->name)) {
 51		printk(KERN_NOTICE "TCP %s already registered\n", ca->name);
 
 52		ret = -EEXIST;
 53	} else {
 54		list_add_tail_rcu(&ca->list, &tcp_cong_list);
 55		printk(KERN_INFO "TCP %s registered\n", ca->name);
 56	}
 57	spin_unlock(&tcp_cong_list_lock);
 58
 59	return ret;
 60}
 61EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
 62
 63/*
 64 * Remove congestion control algorithm, called from
 65 * the module's remove function.  Module ref counts are used
 66 * to ensure that this can't be done till all sockets using
 67 * that method are closed.
 68 */
 69void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
 70{
 71	spin_lock(&tcp_cong_list_lock);
 72	list_del_rcu(&ca->list);
 73	spin_unlock(&tcp_cong_list_lock);
 
 
 
 
 
 
 
 
 
 74}
 75EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77/* Assign choice of congestion control. */
 78void tcp_init_congestion_control(struct sock *sk)
 79{
 
 80	struct inet_connection_sock *icsk = inet_csk(sk);
 81	struct tcp_congestion_ops *ca;
 
 
 
 
 
 
 
 82
 83	/* if no choice made yet assign the current value set as default */
 84	if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) {
 85		rcu_read_lock();
 86		list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
 87			if (try_module_get(ca->owner)) {
 88				icsk->icsk_ca_ops = ca;
 89				break;
 90			}
 91
 92			/* fallback to next available */
 93		}
 94		rcu_read_unlock();
 95	}
 96
 
 97	if (icsk->icsk_ca_ops->init)
 98		icsk->icsk_ca_ops->init(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99}
100
101/* Manage refcounts on socket close. */
102void tcp_cleanup_congestion_control(struct sock *sk)
103{
104	struct inet_connection_sock *icsk = inet_csk(sk);
105
106	if (icsk->icsk_ca_ops->release)
107		icsk->icsk_ca_ops->release(sk);
108	module_put(icsk->icsk_ca_ops->owner);
109}
110
111/* Used by sysctl to change default congestion control */
112int tcp_set_default_congestion_control(const char *name)
113{
114	struct tcp_congestion_ops *ca;
115	int ret = -ENOENT;
 
116
117	spin_lock(&tcp_cong_list_lock);
118	ca = tcp_ca_find(name);
119#ifdef CONFIG_MODULES
120	if (!ca && capable(CAP_NET_ADMIN)) {
121		spin_unlock(&tcp_cong_list_lock);
 
 
 
 
 
 
 
 
 
122
123		request_module("tcp_%s", name);
124		spin_lock(&tcp_cong_list_lock);
125		ca = tcp_ca_find(name);
126	}
127#endif
128
129	if (ca) {
130		ca->flags |= TCP_CONG_NON_RESTRICTED;	/* default is always allowed */
131		list_move(&ca->list, &tcp_cong_list);
132		ret = 0;
133	}
134	spin_unlock(&tcp_cong_list_lock);
135
136	return ret;
137}
138
139/* Set default value from kernel configuration at bootup */
140static int __init tcp_congestion_default(void)
141{
142	return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
 
143}
144late_initcall(tcp_congestion_default);
145
146
147/* Build string with list of available congestion control values */
148void tcp_get_available_congestion_control(char *buf, size_t maxlen)
149{
150	struct tcp_congestion_ops *ca;
151	size_t offs = 0;
152
153	rcu_read_lock();
154	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
155		offs += snprintf(buf + offs, maxlen - offs,
156				 "%s%s",
157				 offs == 0 ? "" : " ", ca->name);
158
 
 
159	}
160	rcu_read_unlock();
161}
162
163/* Get current default congestion control */
164void tcp_get_default_congestion_control(char *name)
165{
166	struct tcp_congestion_ops *ca;
167	/* We will always have reno... */
168	BUG_ON(list_empty(&tcp_cong_list));
169
170	rcu_read_lock();
171	ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
172	strncpy(name, ca->name, TCP_CA_NAME_MAX);
173	rcu_read_unlock();
174}
175
176/* Built list of non-restricted congestion control values */
177void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
178{
179	struct tcp_congestion_ops *ca;
180	size_t offs = 0;
181
182	*buf = '\0';
183	rcu_read_lock();
184	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
185		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
186			continue;
187		offs += snprintf(buf + offs, maxlen - offs,
188				 "%s%s",
189				 offs == 0 ? "" : " ", ca->name);
190
 
 
191	}
192	rcu_read_unlock();
193}
194
195/* Change list of non-restricted congestion control */
196int tcp_set_allowed_congestion_control(char *val)
197{
198	struct tcp_congestion_ops *ca;
199	char *saved_clone, *clone, *name;
200	int ret = 0;
201
202	saved_clone = clone = kstrdup(val, GFP_USER);
203	if (!clone)
204		return -ENOMEM;
205
206	spin_lock(&tcp_cong_list_lock);
207	/* pass 1 check for bad entries */
208	while ((name = strsep(&clone, " ")) && *name) {
209		ca = tcp_ca_find(name);
210		if (!ca) {
211			ret = -ENOENT;
212			goto out;
213		}
214	}
215
216	/* pass 2 clear old values */
217	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
218		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
219
220	/* pass 3 mark as allowed */
221	while ((name = strsep(&val, " ")) && *name) {
222		ca = tcp_ca_find(name);
223		WARN_ON(!ca);
224		if (ca)
225			ca->flags |= TCP_CONG_NON_RESTRICTED;
226	}
227out:
228	spin_unlock(&tcp_cong_list_lock);
229	kfree(saved_clone);
230
231	return ret;
232}
233
234
235/* Change congestion control for socket */
236int tcp_set_congestion_control(struct sock *sk, const char *name)
 
 
 
 
237{
238	struct inet_connection_sock *icsk = inet_csk(sk);
239	struct tcp_congestion_ops *ca;
240	int err = 0;
241
 
 
 
242	rcu_read_lock();
243	ca = tcp_ca_find(name);
 
 
 
244
245	/* no change asking for existing value */
246	if (ca == icsk->icsk_ca_ops)
 
247		goto out;
 
248
249#ifdef CONFIG_MODULES
250	/* not found attempt to autoload module */
251	if (!ca && capable(CAP_NET_ADMIN)) {
252		rcu_read_unlock();
253		request_module("tcp_%s", name);
254		rcu_read_lock();
255		ca = tcp_ca_find(name);
256	}
257#endif
258	if (!ca)
259		err = -ENOENT;
260
261	else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN)))
262		err = -EPERM;
263
264	else if (!try_module_get(ca->owner))
265		err = -EBUSY;
266
267	else {
268		tcp_cleanup_congestion_control(sk);
269		icsk->icsk_ca_ops = ca;
270
271		if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
272			icsk->icsk_ca_ops->init(sk);
273	}
274 out:
275	rcu_read_unlock();
276	return err;
277}
278
279/* RFC2861 Check whether we are limited by application or congestion window
280 * This is the inverse of cwnd check in tcp_tso_should_defer
 
 
 
 
 
 
281 */
282int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
283{
284	const struct tcp_sock *tp = tcp_sk(sk);
285	u32 left;
286
287	if (in_flight >= tp->snd_cwnd)
288		return 1;
289
290	left = tp->snd_cwnd - in_flight;
291	if (sk_can_gso(sk) &&
292	    left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
293	    left * tp->mss_cache < sk->sk_gso_max_size)
294		return 1;
295	return left <= tcp_max_burst(tp);
296}
297EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
298
299/*
300 * Slow start is used when congestion window is less than slow start
301 * threshold. This version implements the basic RFC2581 version
302 * and optionally supports:
303 * 	RFC3742 Limited Slow Start  	  - growth limited to max_ssthresh
304 *	RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged
305 */
306void tcp_slow_start(struct tcp_sock *tp)
307{
308	int cnt; /* increase in packets */
309
310	/* RFC3465: ABC Slow start
311	 * Increase only after a full MSS of bytes is acked
312	 *
313	 * TCP sender SHOULD increase cwnd by the number of
314	 * previously unacknowledged bytes ACKed by each incoming
315	 * acknowledgment, provided the increase is not more than L
316	 */
317	if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
318		return;
319
320	if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
321		cnt = sysctl_tcp_max_ssthresh >> 1;	/* limited slow start */
322	else
323		cnt = tp->snd_cwnd;			/* exponential increase */
324
325	/* RFC3465: ABC
326	 * We MAY increase by 2 if discovered delayed ack
327	 */
328	if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
329		cnt <<= 1;
330	tp->bytes_acked = 0;
331
332	tp->snd_cwnd_cnt += cnt;
333	while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
334		tp->snd_cwnd_cnt -= tp->snd_cwnd;
335		if (tp->snd_cwnd < tp->snd_cwnd_clamp)
336			tp->snd_cwnd++;
337	}
338}
339EXPORT_SYMBOL_GPL(tcp_slow_start);
340
341/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
342void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
343{
344	if (tp->snd_cwnd_cnt >= w) {
345		if (tp->snd_cwnd < tp->snd_cwnd_clamp)
346			tp->snd_cwnd++;
347		tp->snd_cwnd_cnt = 0;
348	} else {
349		tp->snd_cwnd_cnt++;
350	}
 
351}
352EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
353
354/*
355 * TCP Reno congestion control
356 * This is special case used for fallback as well.
357 */
358/* This is Jacobson's slow start and congestion avoidance.
359 * SIGCOMM '88, p. 328.
360 */
361void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
362{
363	struct tcp_sock *tp = tcp_sk(sk);
364
365	if (!tcp_is_cwnd_limited(sk, in_flight))
366		return;
367
368	/* In "safe" area, increase. */
369	if (tp->snd_cwnd <= tp->snd_ssthresh)
370		tcp_slow_start(tp);
371
 
 
372	/* In dangerous area, increase slowly. */
373	else if (sysctl_tcp_abc) {
374		/* RFC3465: Appropriate Byte Count
375		 * increase once for each full cwnd acked
376		 */
377		if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
378			tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
379			if (tp->snd_cwnd < tp->snd_cwnd_clamp)
380				tp->snd_cwnd++;
381		}
382	} else {
383		tcp_cong_avoid_ai(tp, tp->snd_cwnd);
384	}
385}
386EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
387
388/* Slow start threshold is half the congestion window (min 2) */
389u32 tcp_reno_ssthresh(struct sock *sk)
390{
391	const struct tcp_sock *tp = tcp_sk(sk);
392	return max(tp->snd_cwnd >> 1U, 2U);
 
393}
394EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
395
396/* Lower bound on congestion window with halving. */
397u32 tcp_reno_min_cwnd(const struct sock *sk)
398{
399	const struct tcp_sock *tp = tcp_sk(sk);
400	return tp->snd_ssthresh/2;
 
401}
402EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
403
404struct tcp_congestion_ops tcp_reno = {
405	.flags		= TCP_CONG_NON_RESTRICTED,
406	.name		= "reno",
407	.owner		= THIS_MODULE,
408	.ssthresh	= tcp_reno_ssthresh,
409	.cong_avoid	= tcp_reno_cong_avoid,
410	.min_cwnd	= tcp_reno_min_cwnd,
411};
412
413/* Initial congestion control used (until SYN)
414 * really reno under another name so we can tell difference
415 * during tcp_set_default_congestion_control
416 */
417struct tcp_congestion_ops tcp_init_congestion_ops  = {
418	.name		= "",
419	.owner		= THIS_MODULE,
420	.ssthresh	= tcp_reno_ssthresh,
421	.cong_avoid	= tcp_reno_cong_avoid,
422	.min_cwnd	= tcp_reno_min_cwnd,
423};
424EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);