Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2018 Facebook
  4 */
  5#include <linux/bpf.h>
  6#include <linux/err.h>
  7#include <linux/sock_diag.h>
  8#include <net/sock_reuseport.h>
  9
 10struct reuseport_array {
 11	struct bpf_map map;
 12	struct sock __rcu *ptrs[];
 13};
 14
 15static struct reuseport_array *reuseport_array(struct bpf_map *map)
 16{
 17	return (struct reuseport_array *)map;
 18}
 19
 20/* The caller must hold the reuseport_lock */
 21void bpf_sk_reuseport_detach(struct sock *sk)
 22{
 23	uintptr_t sk_user_data;
 24
 25	write_lock_bh(&sk->sk_callback_lock);
 26	sk_user_data = (uintptr_t)sk->sk_user_data;
 27	if (sk_user_data & SK_USER_DATA_BPF) {
 28		struct sock __rcu **socks;
 29
 30		socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
 31		WRITE_ONCE(sk->sk_user_data, NULL);
 32		/*
 33		 * Do not move this NULL assignment outside of
 34		 * sk->sk_callback_lock because there is
 35		 * a race with reuseport_array_free()
 36		 * which does not hold the reuseport_lock.
 37		 */
 38		RCU_INIT_POINTER(*socks, NULL);
 39	}
 40	write_unlock_bh(&sk->sk_callback_lock);
 41}
 42
 43static int reuseport_array_alloc_check(union bpf_attr *attr)
 44{
 45	if (attr->value_size != sizeof(u32) &&
 46	    attr->value_size != sizeof(u64))
 47		return -EINVAL;
 48
 49	return array_map_alloc_check(attr);
 50}
 51
 52static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
 53{
 54	struct reuseport_array *array = reuseport_array(map);
 55	u32 index = *(u32 *)key;
 56
 57	if (unlikely(index >= array->map.max_entries))
 58		return NULL;
 59
 60	return rcu_dereference(array->ptrs[index]);
 61}
 62
 63/* Called from syscall only */
 64static int reuseport_array_delete_elem(struct bpf_map *map, void *key)
 65{
 66	struct reuseport_array *array = reuseport_array(map);
 67	u32 index = *(u32 *)key;
 68	struct sock *sk;
 69	int err;
 70
 71	if (index >= map->max_entries)
 72		return -E2BIG;
 73
 74	if (!rcu_access_pointer(array->ptrs[index]))
 75		return -ENOENT;
 76
 77	spin_lock_bh(&reuseport_lock);
 78
 79	sk = rcu_dereference_protected(array->ptrs[index],
 80				       lockdep_is_held(&reuseport_lock));
 81	if (sk) {
 82		write_lock_bh(&sk->sk_callback_lock);
 83		WRITE_ONCE(sk->sk_user_data, NULL);
 84		RCU_INIT_POINTER(array->ptrs[index], NULL);
 85		write_unlock_bh(&sk->sk_callback_lock);
 86		err = 0;
 87	} else {
 88		err = -ENOENT;
 89	}
 90
 91	spin_unlock_bh(&reuseport_lock);
 92
 93	return err;
 94}
 95
 96static void reuseport_array_free(struct bpf_map *map)
 97{
 98	struct reuseport_array *array = reuseport_array(map);
 99	struct sock *sk;
100	u32 i;
101
102	/*
103	 * ops->map_*_elem() will not be able to access this
104	 * array now. Hence, this function only races with
105	 * bpf_sk_reuseport_detach() which was triggerred by
106	 * close() or disconnect().
107	 *
108	 * This function and bpf_sk_reuseport_detach() are
109	 * both removing sk from "array".  Who removes it
110	 * first does not matter.
111	 *
112	 * The only concern here is bpf_sk_reuseport_detach()
113	 * may access "array" which is being freed here.
114	 * bpf_sk_reuseport_detach() access this "array"
115	 * through sk->sk_user_data _and_ with sk->sk_callback_lock
116	 * held which is enough because this "array" is not freed
117	 * until all sk->sk_user_data has stopped referencing this "array".
118	 *
119	 * Hence, due to the above, taking "reuseport_lock" is not
120	 * needed here.
121	 */
122
123	/*
124	 * Since reuseport_lock is not taken, sk is accessed under
125	 * rcu_read_lock()
126	 */
127	rcu_read_lock();
128	for (i = 0; i < map->max_entries; i++) {
129		sk = rcu_dereference(array->ptrs[i]);
130		if (sk) {
131			write_lock_bh(&sk->sk_callback_lock);
132			/*
133			 * No need for WRITE_ONCE(). At this point,
134			 * no one is reading it without taking the
135			 * sk->sk_callback_lock.
136			 */
137			sk->sk_user_data = NULL;
138			write_unlock_bh(&sk->sk_callback_lock);
139			RCU_INIT_POINTER(array->ptrs[i], NULL);
140		}
141	}
142	rcu_read_unlock();
143
144	/*
145	 * Once reaching here, all sk->sk_user_data is not
146	 * referenceing this "array".  "array" can be freed now.
147	 */
148	bpf_map_area_free(array);
149}
150
151static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
152{
153	int err, numa_node = bpf_map_attr_numa_node(attr);
154	struct reuseport_array *array;
155	struct bpf_map_memory mem;
156	u64 array_size;
157
158	if (!bpf_capable())
159		return ERR_PTR(-EPERM);
160
161	array_size = sizeof(*array);
162	array_size += (u64)attr->max_entries * sizeof(struct sock *);
163
164	err = bpf_map_charge_init(&mem, array_size);
165	if (err)
166		return ERR_PTR(err);
167
168	/* allocate all map elements and zero-initialize them */
169	array = bpf_map_area_alloc(array_size, numa_node);
170	if (!array) {
171		bpf_map_charge_finish(&mem);
172		return ERR_PTR(-ENOMEM);
173	}
174
175	/* copy mandatory map attributes */
176	bpf_map_init_from_attr(&array->map, attr);
177	bpf_map_charge_move(&array->map.memory, &mem);
178
179	return &array->map;
180}
181
182int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
183				       void *value)
184{
185	struct sock *sk;
186	int err;
187
188	if (map->value_size != sizeof(u64))
189		return -ENOSPC;
190
191	rcu_read_lock();
192	sk = reuseport_array_lookup_elem(map, key);
193	if (sk) {
194		*(u64 *)value = sock_gen_cookie(sk);
195		err = 0;
196	} else {
197		err = -ENOENT;
198	}
199	rcu_read_unlock();
200
201	return err;
202}
203
204static int
205reuseport_array_update_check(const struct reuseport_array *array,
206			     const struct sock *nsk,
207			     const struct sock *osk,
208			     const struct sock_reuseport *nsk_reuse,
209			     u32 map_flags)
210{
211	if (osk && map_flags == BPF_NOEXIST)
212		return -EEXIST;
213
214	if (!osk && map_flags == BPF_EXIST)
215		return -ENOENT;
216
217	if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP)
218		return -ENOTSUPP;
219
220	if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6)
221		return -ENOTSUPP;
222
223	if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM)
224		return -ENOTSUPP;
225
226	/*
227	 * sk must be hashed (i.e. listening in the TCP case or binded
228	 * in the UDP case) and
229	 * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL).
230	 *
231	 * Also, sk will be used in bpf helper that is protected by
232	 * rcu_read_lock().
233	 */
234	if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse)
235		return -EINVAL;
236
237	/* READ_ONCE because the sk->sk_callback_lock may not be held here */
238	if (READ_ONCE(nsk->sk_user_data))
239		return -EBUSY;
240
241	return 0;
242}
243
244/*
245 * Called from syscall only.
246 * The "nsk" in the fd refcnt.
247 * The "osk" and "reuse" are protected by reuseport_lock.
248 */
249int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
250				       void *value, u64 map_flags)
251{
252	struct reuseport_array *array = reuseport_array(map);
253	struct sock *free_osk = NULL, *osk, *nsk;
254	struct sock_reuseport *reuse;
255	u32 index = *(u32 *)key;
256	uintptr_t sk_user_data;
257	struct socket *socket;
258	int err, fd;
259
260	if (map_flags > BPF_EXIST)
261		return -EINVAL;
262
263	if (index >= map->max_entries)
264		return -E2BIG;
265
266	if (map->value_size == sizeof(u64)) {
267		u64 fd64 = *(u64 *)value;
268
269		if (fd64 > S32_MAX)
270			return -EINVAL;
271		fd = fd64;
272	} else {
273		fd = *(int *)value;
274	}
275
276	socket = sockfd_lookup(fd, &err);
277	if (!socket)
278		return err;
279
280	nsk = socket->sk;
281	if (!nsk) {
282		err = -EINVAL;
283		goto put_file;
284	}
285
286	/* Quick checks before taking reuseport_lock */
287	err = reuseport_array_update_check(array, nsk,
288					   rcu_access_pointer(array->ptrs[index]),
289					   rcu_access_pointer(nsk->sk_reuseport_cb),
290					   map_flags);
291	if (err)
292		goto put_file;
293
294	spin_lock_bh(&reuseport_lock);
295	/*
296	 * Some of the checks only need reuseport_lock
297	 * but it is done under sk_callback_lock also
298	 * for simplicity reason.
299	 */
300	write_lock_bh(&nsk->sk_callback_lock);
301
302	osk = rcu_dereference_protected(array->ptrs[index],
303					lockdep_is_held(&reuseport_lock));
304	reuse = rcu_dereference_protected(nsk->sk_reuseport_cb,
305					  lockdep_is_held(&reuseport_lock));
306	err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags);
307	if (err)
308		goto put_file_unlock;
309
310	sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY |
311		SK_USER_DATA_BPF;
312	WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data);
313	rcu_assign_pointer(array->ptrs[index], nsk);
314	free_osk = osk;
315	err = 0;
316
317put_file_unlock:
318	write_unlock_bh(&nsk->sk_callback_lock);
319
320	if (free_osk) {
321		write_lock_bh(&free_osk->sk_callback_lock);
322		WRITE_ONCE(free_osk->sk_user_data, NULL);
323		write_unlock_bh(&free_osk->sk_callback_lock);
324	}
325
326	spin_unlock_bh(&reuseport_lock);
327put_file:
328	fput(socket->file);
329	return err;
330}
331
332/* Called from syscall */
333static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
334					void *next_key)
335{
336	struct reuseport_array *array = reuseport_array(map);
337	u32 index = key ? *(u32 *)key : U32_MAX;
338	u32 *next = (u32 *)next_key;
339
340	if (index >= array->map.max_entries) {
341		*next = 0;
342		return 0;
343	}
344
345	if (index == array->map.max_entries - 1)
346		return -ENOENT;
347
348	*next = index + 1;
349	return 0;
350}
351
352static int reuseport_array_map_btf_id;
353const struct bpf_map_ops reuseport_array_ops = {
354	.map_alloc_check = reuseport_array_alloc_check,
355	.map_alloc = reuseport_array_alloc,
356	.map_free = reuseport_array_free,
357	.map_lookup_elem = reuseport_array_lookup_elem,
358	.map_get_next_key = reuseport_array_get_next_key,
359	.map_delete_elem = reuseport_array_delete_elem,
360	.map_btf_name = "reuseport_array",
361	.map_btf_id = &reuseport_array_map_btf_id,
362};