Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Multipath TCP
  3 *
  4 * Copyright (c) 2022, SUSE.
  5 */
  6
  7#define pr_fmt(fmt) "MPTCP: " fmt
  8
  9#include <linux/kernel.h>
 10#include <linux/module.h>
 11#include <linux/list.h>
 12#include <linux/rculist.h>
 13#include <linux/spinlock.h>
 14#include "protocol.h"
 15
 16static DEFINE_SPINLOCK(mptcp_sched_list_lock);
 17static LIST_HEAD(mptcp_sched_list);
 18
 19static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk,
 20					   struct mptcp_sched_data *data)
 21{
 22	struct sock *ssk;
 23
 24	ssk = data->reinject ? mptcp_subflow_get_retrans(msk) :
 25			       mptcp_subflow_get_send(msk);
 26	if (!ssk)
 27		return -EINVAL;
 28
 29	mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
 30	return 0;
 31}
 32
 33static struct mptcp_sched_ops mptcp_sched_default = {
 34	.get_subflow	= mptcp_sched_default_get_subflow,
 35	.name		= "default",
 36	.owner		= THIS_MODULE,
 37};
 38
 39/* Must be called with rcu read lock held */
 40struct mptcp_sched_ops *mptcp_sched_find(const char *name)
 41{
 42	struct mptcp_sched_ops *sched, *ret = NULL;
 43
 44	list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
 45		if (!strcmp(sched->name, name)) {
 46			ret = sched;
 47			break;
 48		}
 49	}
 50
 51	return ret;
 52}
 53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
 55{
 56	if (!sched->get_subflow)
 57		return -EINVAL;
 58
 59	spin_lock(&mptcp_sched_list_lock);
 60	if (mptcp_sched_find(sched->name)) {
 61		spin_unlock(&mptcp_sched_list_lock);
 62		return -EEXIST;
 63	}
 64	list_add_tail_rcu(&sched->list, &mptcp_sched_list);
 65	spin_unlock(&mptcp_sched_list_lock);
 66
 67	pr_debug("%s registered", sched->name);
 68	return 0;
 69}
 70
 71void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
 72{
 73	if (sched == &mptcp_sched_default)
 74		return;
 75
 76	spin_lock(&mptcp_sched_list_lock);
 77	list_del_rcu(&sched->list);
 78	spin_unlock(&mptcp_sched_list_lock);
 79}
 80
 81void mptcp_sched_init(void)
 82{
 83	mptcp_register_scheduler(&mptcp_sched_default);
 84}
 85
 86int mptcp_init_sched(struct mptcp_sock *msk,
 87		     struct mptcp_sched_ops *sched)
 88{
 89	if (!sched)
 90		sched = &mptcp_sched_default;
 91
 92	if (!bpf_try_module_get(sched, sched->owner))
 93		return -EBUSY;
 94
 95	msk->sched = sched;
 96	if (msk->sched->init)
 97		msk->sched->init(msk);
 98
 99	pr_debug("sched=%s", msk->sched->name);
100
101	return 0;
102}
103
104void mptcp_release_sched(struct mptcp_sock *msk)
105{
106	struct mptcp_sched_ops *sched = msk->sched;
107
108	if (!sched)
109		return;
110
111	msk->sched = NULL;
112	if (sched->release)
113		sched->release(msk);
114
115	bpf_module_put(sched, sched->owner);
116}
117
118void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
119				 bool scheduled)
120{
121	WRITE_ONCE(subflow->scheduled, scheduled);
122}
123
124int mptcp_sched_get_send(struct mptcp_sock *msk)
125{
126	struct mptcp_subflow_context *subflow;
127	struct mptcp_sched_data data;
128
129	msk_owned_by_me(msk);
130
131	/* the following check is moved out of mptcp_subflow_get_send */
132	if (__mptcp_check_fallback(msk)) {
133		if (msk->first &&
134		    __tcp_can_send(msk->first) &&
135		    sk_stream_memory_free(msk->first)) {
136			mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
137			return 0;
138		}
139		return -EINVAL;
140	}
141
142	mptcp_for_each_subflow(msk, subflow) {
143		if (READ_ONCE(subflow->scheduled))
144			return 0;
145	}
146
147	data.reinject = false;
148	if (msk->sched == &mptcp_sched_default || !msk->sched)
149		return mptcp_sched_default_get_subflow(msk, &data);
150	return msk->sched->get_subflow(msk, &data);
151}
152
153int mptcp_sched_get_retrans(struct mptcp_sock *msk)
154{
155	struct mptcp_subflow_context *subflow;
156	struct mptcp_sched_data data;
157
158	msk_owned_by_me(msk);
159
160	/* the following check is moved out of mptcp_subflow_get_retrans */
161	if (__mptcp_check_fallback(msk))
162		return -EINVAL;
163
164	mptcp_for_each_subflow(msk, subflow) {
165		if (READ_ONCE(subflow->scheduled))
166			return 0;
167	}
168
169	data.reinject = true;
170	if (msk->sched == &mptcp_sched_default || !msk->sched)
171		return mptcp_sched_default_get_subflow(msk, &data);
172	return msk->sched->get_subflow(msk, &data);
173}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Multipath TCP
  3 *
  4 * Copyright (c) 2022, SUSE.
  5 */
  6
  7#define pr_fmt(fmt) "MPTCP: " fmt
  8
  9#include <linux/kernel.h>
 10#include <linux/module.h>
 11#include <linux/list.h>
 12#include <linux/rculist.h>
 13#include <linux/spinlock.h>
 14#include "protocol.h"
 15
 16static DEFINE_SPINLOCK(mptcp_sched_list_lock);
 17static LIST_HEAD(mptcp_sched_list);
 18
 19static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk,
 20					   struct mptcp_sched_data *data)
 21{
 22	struct sock *ssk;
 23
 24	ssk = data->reinject ? mptcp_subflow_get_retrans(msk) :
 25			       mptcp_subflow_get_send(msk);
 26	if (!ssk)
 27		return -EINVAL;
 28
 29	mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
 30	return 0;
 31}
 32
 33static struct mptcp_sched_ops mptcp_sched_default = {
 34	.get_subflow	= mptcp_sched_default_get_subflow,
 35	.name		= "default",
 36	.owner		= THIS_MODULE,
 37};
 38
 39/* Must be called with rcu read lock held */
 40struct mptcp_sched_ops *mptcp_sched_find(const char *name)
 41{
 42	struct mptcp_sched_ops *sched, *ret = NULL;
 43
 44	list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
 45		if (!strcmp(sched->name, name)) {
 46			ret = sched;
 47			break;
 48		}
 49	}
 50
 51	return ret;
 52}
 53
 54/* Build string with list of available scheduler values.
 55 * Similar to tcp_get_available_congestion_control()
 56 */
 57void mptcp_get_available_schedulers(char *buf, size_t maxlen)
 58{
 59	struct mptcp_sched_ops *sched;
 60	size_t offs = 0;
 61
 62	rcu_read_lock();
 63	list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
 64		offs += snprintf(buf + offs, maxlen - offs,
 65				 "%s%s",
 66				 offs == 0 ? "" : " ", sched->name);
 67
 68		if (WARN_ON_ONCE(offs >= maxlen))
 69			break;
 70	}
 71	rcu_read_unlock();
 72}
 73
 74int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
 75{
 76	if (!sched->get_subflow)
 77		return -EINVAL;
 78
 79	spin_lock(&mptcp_sched_list_lock);
 80	if (mptcp_sched_find(sched->name)) {
 81		spin_unlock(&mptcp_sched_list_lock);
 82		return -EEXIST;
 83	}
 84	list_add_tail_rcu(&sched->list, &mptcp_sched_list);
 85	spin_unlock(&mptcp_sched_list_lock);
 86
 87	pr_debug("%s registered\n", sched->name);
 88	return 0;
 89}
 90
 91void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
 92{
 93	if (sched == &mptcp_sched_default)
 94		return;
 95
 96	spin_lock(&mptcp_sched_list_lock);
 97	list_del_rcu(&sched->list);
 98	spin_unlock(&mptcp_sched_list_lock);
 99}
100
101void mptcp_sched_init(void)
102{
103	mptcp_register_scheduler(&mptcp_sched_default);
104}
105
106int mptcp_init_sched(struct mptcp_sock *msk,
107		     struct mptcp_sched_ops *sched)
108{
109	if (!sched)
110		sched = &mptcp_sched_default;
111
112	if (!bpf_try_module_get(sched, sched->owner))
113		return -EBUSY;
114
115	msk->sched = sched;
116	if (msk->sched->init)
117		msk->sched->init(msk);
118
119	pr_debug("sched=%s\n", msk->sched->name);
120
121	return 0;
122}
123
124void mptcp_release_sched(struct mptcp_sock *msk)
125{
126	struct mptcp_sched_ops *sched = msk->sched;
127
128	if (!sched)
129		return;
130
131	msk->sched = NULL;
132	if (sched->release)
133		sched->release(msk);
134
135	bpf_module_put(sched, sched->owner);
136}
137
138void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
139				 bool scheduled)
140{
141	WRITE_ONCE(subflow->scheduled, scheduled);
142}
143
144int mptcp_sched_get_send(struct mptcp_sock *msk)
145{
146	struct mptcp_subflow_context *subflow;
147	struct mptcp_sched_data data;
148
149	msk_owned_by_me(msk);
150
151	/* the following check is moved out of mptcp_subflow_get_send */
152	if (__mptcp_check_fallback(msk)) {
153		if (msk->first &&
154		    __tcp_can_send(msk->first) &&
155		    sk_stream_memory_free(msk->first)) {
156			mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
157			return 0;
158		}
159		return -EINVAL;
160	}
161
162	mptcp_for_each_subflow(msk, subflow) {
163		if (READ_ONCE(subflow->scheduled))
164			return 0;
165	}
166
167	data.reinject = false;
168	if (msk->sched == &mptcp_sched_default || !msk->sched)
169		return mptcp_sched_default_get_subflow(msk, &data);
170	return msk->sched->get_subflow(msk, &data);
171}
172
173int mptcp_sched_get_retrans(struct mptcp_sock *msk)
174{
175	struct mptcp_subflow_context *subflow;
176	struct mptcp_sched_data data;
177
178	msk_owned_by_me(msk);
179
180	/* the following check is moved out of mptcp_subflow_get_retrans */
181	if (__mptcp_check_fallback(msk))
182		return -EINVAL;
183
184	mptcp_for_each_subflow(msk, subflow) {
185		if (READ_ONCE(subflow->scheduled))
186			return 0;
187	}
188
189	data.reinject = true;
190	if (msk->sched == &mptcp_sched_default || !msk->sched)
191		return mptcp_sched_default_get_subflow(msk, &data);
192	return msk->sched->get_subflow(msk, &data);
193}