Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * To speed up listener socket lookup, create an array to store all sockets
  4 * listening on the same port.  This allows a decision to be made after finding
  5 * the first socket.  An optional BPF program can also be configured for
  6 * selecting the socket index from the array of available sockets.
  7 */
  8
  9#include <net/ip.h>
 10#include <net/sock_reuseport.h>
 11#include <linux/bpf.h>
 12#include <linux/idr.h>
 13#include <linux/filter.h>
 14#include <linux/rcupdate.h>
 15
 16#define INIT_SOCKS 128
 17
 18DEFINE_SPINLOCK(reuseport_lock);
 19
 
 20static DEFINE_IDA(reuseport_ida);
 21static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
 22			       struct sock_reuseport *reuse, bool bind_inany);
 23
 24void reuseport_has_conns_set(struct sock *sk)
 25{
 26	struct sock_reuseport *reuse;
 27
 28	if (!rcu_access_pointer(sk->sk_reuseport_cb))
 29		return;
 30
 31	spin_lock_bh(&reuseport_lock);
 32	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
 33					  lockdep_is_held(&reuseport_lock));
 34	if (likely(reuse))
 35		reuse->has_conns = 1;
 36	spin_unlock_bh(&reuseport_lock);
 37}
 38EXPORT_SYMBOL(reuseport_has_conns_set);
 39
 40static void __reuseport_get_incoming_cpu(struct sock_reuseport *reuse)
 41{
 42	/* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
 43	WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu + 1);
 44}
 45
 46static void __reuseport_put_incoming_cpu(struct sock_reuseport *reuse)
 47{
 48	/* Paired with READ_ONCE() in reuseport_select_sock_by_hash(). */
 49	WRITE_ONCE(reuse->incoming_cpu, reuse->incoming_cpu - 1);
 50}
 51
 52static void reuseport_get_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
 53{
 54	if (sk->sk_incoming_cpu >= 0)
 55		__reuseport_get_incoming_cpu(reuse);
 56}
 57
 58static void reuseport_put_incoming_cpu(struct sock *sk, struct sock_reuseport *reuse)
 59{
 60	if (sk->sk_incoming_cpu >= 0)
 61		__reuseport_put_incoming_cpu(reuse);
 62}
 63
 64void reuseport_update_incoming_cpu(struct sock *sk, int val)
 65{
 66	struct sock_reuseport *reuse;
 67	int old_sk_incoming_cpu;
 
 68
 69	if (unlikely(!rcu_access_pointer(sk->sk_reuseport_cb))) {
 70		/* Paired with REAE_ONCE() in sk_incoming_cpu_update()
 71		 * and compute_score().
 72		 */
 73		WRITE_ONCE(sk->sk_incoming_cpu, val);
 74		return;
 75	}
 76
 77	spin_lock_bh(&reuseport_lock);
 78
 79	/* This must be done under reuseport_lock to avoid a race with
 80	 * reuseport_grow(), which accesses sk->sk_incoming_cpu without
 81	 * lock_sock() when detaching a shutdown()ed sk.
 82	 *
 83	 * Paired with READ_ONCE() in reuseport_select_sock_by_hash().
 84	 */
 85	old_sk_incoming_cpu = sk->sk_incoming_cpu;
 86	WRITE_ONCE(sk->sk_incoming_cpu, val);
 87
 88	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
 89					  lockdep_is_held(&reuseport_lock));
 90
 91	/* reuseport_grow() has detached a closed sk. */
 92	if (!reuse)
 93		goto out;
 94
 95	if (old_sk_incoming_cpu < 0 && val >= 0)
 96		__reuseport_get_incoming_cpu(reuse);
 97	else if (old_sk_incoming_cpu >= 0 && val < 0)
 98		__reuseport_put_incoming_cpu(reuse);
 99
100out:
101	spin_unlock_bh(&reuseport_lock);
102}
103
104static int reuseport_sock_index(struct sock *sk,
105				const struct sock_reuseport *reuse,
106				bool closed)
107{
108	int left, right;
109
110	if (!closed) {
111		left = 0;
112		right = reuse->num_socks;
113	} else {
114		left = reuse->max_socks - reuse->num_closed_socks;
115		right = reuse->max_socks;
116	}
117
118	for (; left < right; left++)
119		if (reuse->socks[left] == sk)
120			return left;
121	return -1;
122}
123
124static void __reuseport_add_sock(struct sock *sk,
125				 struct sock_reuseport *reuse)
126{
127	reuse->socks[reuse->num_socks] = sk;
128	/* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
129	smp_wmb();
130	reuse->num_socks++;
131	reuseport_get_incoming_cpu(sk, reuse);
132}
133
134static bool __reuseport_detach_sock(struct sock *sk,
135				    struct sock_reuseport *reuse)
136{
137	int i = reuseport_sock_index(sk, reuse, false);
138
139	if (i == -1)
140		return false;
141
142	reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
143	reuse->num_socks--;
144	reuseport_put_incoming_cpu(sk, reuse);
145
146	return true;
147}
148
149static void __reuseport_add_closed_sock(struct sock *sk,
150					struct sock_reuseport *reuse)
151{
152	reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
153	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
154	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
155	reuseport_get_incoming_cpu(sk, reuse);
156}
157
158static bool __reuseport_detach_closed_sock(struct sock *sk,
159					   struct sock_reuseport *reuse)
160{
161	int i = reuseport_sock_index(sk, reuse, true);
162
163	if (i == -1)
164		return false;
165
166	reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
167	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
168	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
169	reuseport_put_incoming_cpu(sk, reuse);
170
171	return true;
172}
173
174static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
175{
176	unsigned int size = sizeof(struct sock_reuseport) +
177		      sizeof(struct sock *) * max_socks;
178	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
179
180	if (!reuse)
181		return NULL;
182
183	reuse->max_socks = max_socks;
184
185	RCU_INIT_POINTER(reuse->prog, NULL);
186	return reuse;
187}
188
189int reuseport_alloc(struct sock *sk, bool bind_inany)
190{
191	struct sock_reuseport *reuse;
192	int id, ret = 0;
193
194	/* bh lock used since this function call may precede hlist lock in
195	 * soft irq of receive path or setsockopt from process context
196	 */
197	spin_lock_bh(&reuseport_lock);
198
199	/* Allocation attempts can occur concurrently via the setsockopt path
200	 * and the bind/hash path.  Nothing to do when we lose the race.
201	 */
202	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
203					  lockdep_is_held(&reuseport_lock));
204	if (reuse) {
205		if (reuse->num_closed_socks) {
206			/* sk was shutdown()ed before */
207			ret = reuseport_resurrect(sk, reuse, NULL, bind_inany);
208			goto out;
209		}
210
211		/* Only set reuse->bind_inany if the bind_inany is true.
212		 * Otherwise, it will overwrite the reuse->bind_inany
213		 * which was set by the bind/hash path.
214		 */
215		if (bind_inany)
216			reuse->bind_inany = bind_inany;
217		goto out;
218	}
219
220	reuse = __reuseport_alloc(INIT_SOCKS);
221	if (!reuse) {
222		ret = -ENOMEM;
223		goto out;
224	}
225
226	id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
227	if (id < 0) {
228		kfree(reuse);
229		ret = id;
230		goto out;
231	}
232
233	reuse->reuseport_id = id;
234	reuse->bind_inany = bind_inany;
235	reuse->socks[0] = sk;
236	reuse->num_socks = 1;
237	reuseport_get_incoming_cpu(sk, reuse);
238	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
239
240out:
241	spin_unlock_bh(&reuseport_lock);
242
243	return ret;
244}
245EXPORT_SYMBOL(reuseport_alloc);
246
247static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
248{
249	struct sock_reuseport *more_reuse;
250	u32 more_socks_size, i;
251
252	more_socks_size = reuse->max_socks * 2U;
253	if (more_socks_size > U16_MAX) {
254		if (reuse->num_closed_socks) {
255			/* Make room by removing a closed sk.
256			 * The child has already been migrated.
257			 * Only reqsk left at this point.
258			 */
259			struct sock *sk;
260
261			sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
262			RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL);
263			__reuseport_detach_closed_sock(sk, reuse);
264
265			return reuse;
266		}
267
268		return NULL;
269	}
270
271	more_reuse = __reuseport_alloc(more_socks_size);
272	if (!more_reuse)
273		return NULL;
274
 
275	more_reuse->num_socks = reuse->num_socks;
276	more_reuse->num_closed_socks = reuse->num_closed_socks;
277	more_reuse->prog = reuse->prog;
278	more_reuse->reuseport_id = reuse->reuseport_id;
279	more_reuse->bind_inany = reuse->bind_inany;
280	more_reuse->has_conns = reuse->has_conns;
281	more_reuse->incoming_cpu = reuse->incoming_cpu;
282
283	memcpy(more_reuse->socks, reuse->socks,
284	       reuse->num_socks * sizeof(struct sock *));
285	memcpy(more_reuse->socks +
286	       (more_reuse->max_socks - more_reuse->num_closed_socks),
287	       reuse->socks + (reuse->max_socks - reuse->num_closed_socks),
288	       reuse->num_closed_socks * sizeof(struct sock *));
289	more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
290
291	for (i = 0; i < reuse->max_socks; ++i)
292		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
293				   more_reuse);
294
295	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
296	 * that reuse and more_reuse can temporarily share a reference
297	 * to prog.
298	 */
299	kfree_rcu(reuse, rcu);
300	return more_reuse;
301}
302
303static void reuseport_free_rcu(struct rcu_head *head)
304{
305	struct sock_reuseport *reuse;
306
307	reuse = container_of(head, struct sock_reuseport, rcu);
308	sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
309	ida_free(&reuseport_ida, reuse->reuseport_id);
 
310	kfree(reuse);
311}
312
313/**
314 *  reuseport_add_sock - Add a socket to the reuseport group of another.
315 *  @sk:  New socket to add to the group.
316 *  @sk2: Socket belonging to the existing reuseport group.
317 *  @bind_inany: Whether or not the group is bound to a local INANY address.
318 *
319 *  May return ENOMEM and not add socket to group under memory pressure.
320 */
321int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
322{
323	struct sock_reuseport *old_reuse, *reuse;
324
325	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
326		int err = reuseport_alloc(sk2, bind_inany);
327
328		if (err)
329			return err;
330	}
331
332	spin_lock_bh(&reuseport_lock);
333	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
334					  lockdep_is_held(&reuseport_lock));
335	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
336					      lockdep_is_held(&reuseport_lock));
337	if (old_reuse && old_reuse->num_closed_socks) {
338		/* sk was shutdown()ed before */
339		int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);
340
341		spin_unlock_bh(&reuseport_lock);
342		return err;
343	}
344
345	if (old_reuse && old_reuse->num_socks != 1) {
346		spin_unlock_bh(&reuseport_lock);
347		return -EBUSY;
348	}
349
350	if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
351		reuse = reuseport_grow(reuse);
352		if (!reuse) {
353			spin_unlock_bh(&reuseport_lock);
354			return -ENOMEM;
355		}
356	}
357
358	__reuseport_add_sock(sk, reuse);
 
 
 
359	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
360
361	spin_unlock_bh(&reuseport_lock);
362
363	if (old_reuse)
364		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
365	return 0;
366}
367EXPORT_SYMBOL(reuseport_add_sock);
368
369static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
370			       struct sock_reuseport *reuse, bool bind_inany)
371{
372	if (old_reuse == reuse) {
373		/* If sk was in the same reuseport group, just pop sk out of
374		 * the closed section and push sk into the listening section.
375		 */
376		__reuseport_detach_closed_sock(sk, old_reuse);
377		__reuseport_add_sock(sk, old_reuse);
378		return 0;
379	}
380
381	if (!reuse) {
382		/* In bind()/listen() path, we cannot carry over the eBPF prog
383		 * for the shutdown()ed socket. In setsockopt() path, we should
384		 * not change the eBPF prog of listening sockets by attaching a
385		 * prog to the shutdown()ed socket. Thus, we will allocate a new
386		 * reuseport group and detach sk from the old group.
387		 */
388		int id;
389
390		reuse = __reuseport_alloc(INIT_SOCKS);
391		if (!reuse)
392			return -ENOMEM;
393
394		id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
395		if (id < 0) {
396			kfree(reuse);
397			return id;
398		}
399
400		reuse->reuseport_id = id;
401		reuse->bind_inany = bind_inany;
402	} else {
403		/* Move sk from the old group to the new one if
404		 * - all the other listeners in the old group were close()d or
405		 *   shutdown()ed, and then sk2 has listen()ed on the same port
406		 * OR
407		 * - sk listen()ed without bind() (or with autobind), was
408		 *   shutdown()ed, and then listen()s on another port which
409		 *   sk2 listen()s on.
410		 */
411		if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
412			reuse = reuseport_grow(reuse);
413			if (!reuse)
414				return -ENOMEM;
415		}
416	}
417
418	__reuseport_detach_closed_sock(sk, old_reuse);
419	__reuseport_add_sock(sk, reuse);
420	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
421
422	if (old_reuse->num_socks + old_reuse->num_closed_socks == 0)
423		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
424
425	return 0;
426}
427
428void reuseport_detach_sock(struct sock *sk)
429{
430	struct sock_reuseport *reuse;
 
431
432	spin_lock_bh(&reuseport_lock);
433	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
434					  lockdep_is_held(&reuseport_lock));
435
436	/* reuseport_grow() has detached a closed sk */
437	if (!reuse)
438		goto out;
439
440	/* Notify the bpf side. The sk may be added to a sockarray
441	 * map. If so, sockarray logic will remove it from the map.
442	 *
443	 * Other bpf map types that work with reuseport, like sockmap,
444	 * don't need an explicit callback from here. They override sk
445	 * unhash/close ops to remove the sk from the map before we
446	 * get to this point.
447	 */
448	bpf_sk_reuseport_detach(sk);
 
449
450	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
451
452	if (!__reuseport_detach_closed_sock(sk, reuse))
453		__reuseport_detach_sock(sk, reuse);
454
455	if (reuse->num_socks + reuse->num_closed_socks == 0)
456		call_rcu(&reuse->rcu, reuseport_free_rcu);
457
458out:
459	spin_unlock_bh(&reuseport_lock);
460}
461EXPORT_SYMBOL(reuseport_detach_sock);
462
463void reuseport_stop_listen_sock(struct sock *sk)
464{
465	if (sk->sk_protocol == IPPROTO_TCP) {
466		struct sock_reuseport *reuse;
467		struct bpf_prog *prog;
468
469		spin_lock_bh(&reuseport_lock);
470
471		reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
472						  lockdep_is_held(&reuseport_lock));
473		prog = rcu_dereference_protected(reuse->prog,
474						 lockdep_is_held(&reuseport_lock));
475
476		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
477		    (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
478			/* Migration capable, move sk from the listening section
479			 * to the closed section.
480			 */
481			bpf_sk_reuseport_detach(sk);
482
483			__reuseport_detach_sock(sk, reuse);
484			__reuseport_add_closed_sock(sk, reuse);
485
486			spin_unlock_bh(&reuseport_lock);
487			return;
488		}
489
490		spin_unlock_bh(&reuseport_lock);
491	}
492
493	/* Not capable to do migration, detach immediately */
494	reuseport_detach_sock(sk);
495}
496EXPORT_SYMBOL(reuseport_stop_listen_sock);
497
498static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
499				   struct bpf_prog *prog, struct sk_buff *skb,
500				   int hdr_len)
501{
502	struct sk_buff *nskb = NULL;
503	u32 index;
504
505	if (skb_shared(skb)) {
506		nskb = skb_clone(skb, GFP_ATOMIC);
507		if (!nskb)
508			return NULL;
509		skb = nskb;
510	}
511
512	/* temporarily advance data past protocol header */
513	if (!pskb_pull(skb, hdr_len)) {
514		kfree_skb(nskb);
515		return NULL;
516	}
517	index = bpf_prog_run_save_cb(prog, skb);
518	__skb_push(skb, hdr_len);
519
520	consume_skb(nskb);
521
522	if (index >= socks)
523		return NULL;
524
525	return reuse->socks[index];
526}
527
528static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
529						  u32 hash, u16 num_socks)
530{
531	struct sock *first_valid_sk = NULL;
532	int i, j;
533
534	i = j = reciprocal_scale(hash, num_socks);
535	do {
536		struct sock *sk = reuse->socks[i];
537
538		if (sk->sk_state != TCP_ESTABLISHED) {
539			/* Paired with WRITE_ONCE() in __reuseport_(get|put)_incoming_cpu(). */
540			if (!READ_ONCE(reuse->incoming_cpu))
541				return sk;
542
543			/* Paired with WRITE_ONCE() in reuseport_update_incoming_cpu(). */
544			if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
545				return sk;
546
547			if (!first_valid_sk)
548				first_valid_sk = sk;
549		}
550
551		i++;
552		if (i >= num_socks)
553			i = 0;
554	} while (i != j);
555
556	return first_valid_sk;
557}
558
559/**
560 *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
561 *  @sk: First socket in the group.
562 *  @hash: When no BPF filter is available, use this hash to select.
563 *  @skb: skb to run through BPF filter.
564 *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
565 *    the skb does not yet point at the payload, this parameter represents
566 *    how far the pointer needs to advance to reach the payload.
567 *  Returns a socket that should receive the packet (or NULL on error).
568 */
569struct sock *reuseport_select_sock(struct sock *sk,
570				   u32 hash,
571				   struct sk_buff *skb,
572				   int hdr_len)
573{
574	struct sock_reuseport *reuse;
575	struct bpf_prog *prog;
576	struct sock *sk2 = NULL;
577	u16 socks;
578
579	rcu_read_lock();
580	reuse = rcu_dereference(sk->sk_reuseport_cb);
581
582	/* if memory allocation failed or add call is not yet complete */
583	if (!reuse)
584		goto out;
585
586	prog = rcu_dereference(reuse->prog);
587	socks = READ_ONCE(reuse->num_socks);
588	if (likely(socks)) {
589		/* paired with smp_wmb() in __reuseport_add_sock() */
590		smp_rmb();
591
592		if (!prog || !skb)
593			goto select_by_hash;
594
595		if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
596			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash);
597		else
598			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
599
600select_by_hash:
601		/* no bpf or invalid bpf result: fall back to hash usage */
602		if (!sk2)
603			sk2 = reuseport_select_sock_by_hash(reuse, hash, socks);
 
 
 
 
 
 
 
 
 
 
 
604	}
605
606out:
607	rcu_read_unlock();
608	return sk2;
609}
610EXPORT_SYMBOL(reuseport_select_sock);
611
612/**
613 *  reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group.
614 *  @sk: close()ed or shutdown()ed socket in the group.
615 *  @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or
616 *    NEW_SYN_RECV request socket during 3WHS.
617 *  @skb: skb to run through BPF filter.
618 *  Returns a socket (with sk_refcnt +1) that should accept the child socket
619 *  (or NULL on error).
620 */
621struct sock *reuseport_migrate_sock(struct sock *sk,
622				    struct sock *migrating_sk,
623				    struct sk_buff *skb)
624{
625	struct sock_reuseport *reuse;
626	struct sock *nsk = NULL;
627	bool allocated = false;
628	struct bpf_prog *prog;
629	u16 socks;
630	u32 hash;
631
632	rcu_read_lock();
633
634	reuse = rcu_dereference(sk->sk_reuseport_cb);
635	if (!reuse)
636		goto out;
637
638	socks = READ_ONCE(reuse->num_socks);
639	if (unlikely(!socks))
640		goto failure;
641
642	/* paired with smp_wmb() in __reuseport_add_sock() */
643	smp_rmb();
644
645	hash = migrating_sk->sk_hash;
646	prog = rcu_dereference(reuse->prog);
647	if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
648		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req))
649			goto select_by_hash;
650		goto failure;
651	}
652
653	if (!skb) {
654		skb = alloc_skb(0, GFP_ATOMIC);
655		if (!skb)
656			goto failure;
657		allocated = true;
658	}
659
660	nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash);
661
662	if (allocated)
663		kfree_skb(skb);
664
665select_by_hash:
666	if (!nsk)
667		nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
668
669	if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
670		nsk = NULL;
671		goto failure;
672	}
673
674out:
675	rcu_read_unlock();
676	return nsk;
677
678failure:
679	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
680	goto out;
681}
682EXPORT_SYMBOL(reuseport_migrate_sock);
683
684int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
685{
686	struct sock_reuseport *reuse;
687	struct bpf_prog *old_prog;
688
689	if (sk_unhashed(sk)) {
690		int err;
691
692		if (!sk->sk_reuseport)
693			return -EINVAL;
694
695		err = reuseport_alloc(sk, false);
696		if (err)
697			return err;
698	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
699		/* The socket wasn't bound with SO_REUSEPORT */
700		return -EINVAL;
701	}
702
703	spin_lock_bh(&reuseport_lock);
704	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
705					  lockdep_is_held(&reuseport_lock));
706	old_prog = rcu_dereference_protected(reuse->prog,
707					     lockdep_is_held(&reuseport_lock));
708	rcu_assign_pointer(reuse->prog, prog);
709	spin_unlock_bh(&reuseport_lock);
710
711	sk_reuseport_prog_free(old_prog);
712	return 0;
713}
714EXPORT_SYMBOL(reuseport_attach_prog);
715
716int reuseport_detach_prog(struct sock *sk)
717{
718	struct sock_reuseport *reuse;
719	struct bpf_prog *old_prog;
720
 
 
 
721	old_prog = NULL;
722	spin_lock_bh(&reuseport_lock);
723	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
724					  lockdep_is_held(&reuseport_lock));
725
726	/* reuse must be checked after acquiring the reuseport_lock
727	 * because reuseport_grow() can detach a closed sk.
728	 */
729	if (!reuse) {
730		spin_unlock_bh(&reuseport_lock);
731		return sk->sk_reuseport ? -ENOENT : -EINVAL;
732	}
733
734	if (sk_unhashed(sk) && reuse->num_closed_socks) {
735		spin_unlock_bh(&reuseport_lock);
736		return -ENOENT;
737	}
738
739	old_prog = rcu_replace_pointer(reuse->prog, old_prog,
740				       lockdep_is_held(&reuseport_lock));
741	spin_unlock_bh(&reuseport_lock);
742
743	if (!old_prog)
744		return -ENOENT;
745
746	sk_reuseport_prog_free(old_prog);
747	return 0;
748}
749EXPORT_SYMBOL(reuseport_detach_prog);
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * To speed up listener socket lookup, create an array to store all sockets
  4 * listening on the same port.  This allows a decision to be made after finding
  5 * the first socket.  An optional BPF program can also be configured for
  6 * selecting the socket index from the array of available sockets.
  7 */
  8
 
  9#include <net/sock_reuseport.h>
 10#include <linux/bpf.h>
 11#include <linux/idr.h>
 12#include <linux/filter.h>
 13#include <linux/rcupdate.h>
 14
 15#define INIT_SOCKS 128
 16
 17DEFINE_SPINLOCK(reuseport_lock);
 18
 19#define REUSEPORT_MIN_ID 1
 20static DEFINE_IDA(reuseport_ida);
 
 
 21
 22int reuseport_get_id(struct sock_reuseport *reuse)
 23{
 24	int id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26	if (reuse->reuseport_id)
 27		return reuse->reuseport_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28
 29	id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
 30			    /* Called under reuseport_lock */
 31			    GFP_ATOMIC);
 32	if (id < 0)
 33		return id;
 34
 35	reuse->reuseport_id = id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36
 37	return reuse->reuseport_id;
 38}
 39
 40static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
 41{
 42	unsigned int size = sizeof(struct sock_reuseport) +
 43		      sizeof(struct sock *) * max_socks;
 44	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
 45
 46	if (!reuse)
 47		return NULL;
 48
 49	reuse->max_socks = max_socks;
 50
 51	RCU_INIT_POINTER(reuse->prog, NULL);
 52	return reuse;
 53}
 54
 55int reuseport_alloc(struct sock *sk, bool bind_inany)
 56{
 57	struct sock_reuseport *reuse;
 
 58
 59	/* bh lock used since this function call may precede hlist lock in
 60	 * soft irq of receive path or setsockopt from process context
 61	 */
 62	spin_lock_bh(&reuseport_lock);
 63
 64	/* Allocation attempts can occur concurrently via the setsockopt path
 65	 * and the bind/hash path.  Nothing to do when we lose the race.
 66	 */
 67	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
 68					  lockdep_is_held(&reuseport_lock));
 69	if (reuse) {
 
 
 
 
 
 
 70		/* Only set reuse->bind_inany if the bind_inany is true.
 71		 * Otherwise, it will overwrite the reuse->bind_inany
 72		 * which was set by the bind/hash path.
 73		 */
 74		if (bind_inany)
 75			reuse->bind_inany = bind_inany;
 76		goto out;
 77	}
 78
 79	reuse = __reuseport_alloc(INIT_SOCKS);
 80	if (!reuse) {
 81		spin_unlock_bh(&reuseport_lock);
 82		return -ENOMEM;
 
 
 
 
 
 
 
 83	}
 84
 
 
 85	reuse->socks[0] = sk;
 86	reuse->num_socks = 1;
 87	reuse->bind_inany = bind_inany;
 88	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
 89
 90out:
 91	spin_unlock_bh(&reuseport_lock);
 92
 93	return 0;
 94}
 95EXPORT_SYMBOL(reuseport_alloc);
 96
 97static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
 98{
 99	struct sock_reuseport *more_reuse;
100	u32 more_socks_size, i;
101
102	more_socks_size = reuse->max_socks * 2U;
103	if (more_socks_size > U16_MAX)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104		return NULL;
 
105
106	more_reuse = __reuseport_alloc(more_socks_size);
107	if (!more_reuse)
108		return NULL;
109
110	more_reuse->max_socks = more_socks_size;
111	more_reuse->num_socks = reuse->num_socks;
 
112	more_reuse->prog = reuse->prog;
113	more_reuse->reuseport_id = reuse->reuseport_id;
114	more_reuse->bind_inany = reuse->bind_inany;
 
 
115
116	memcpy(more_reuse->socks, reuse->socks,
117	       reuse->num_socks * sizeof(struct sock *));
 
 
 
 
118	more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
119
120	for (i = 0; i < reuse->num_socks; ++i)
121		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
122				   more_reuse);
123
124	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
125	 * that reuse and more_reuse can temporarily share a reference
126	 * to prog.
127	 */
128	kfree_rcu(reuse, rcu);
129	return more_reuse;
130}
131
132static void reuseport_free_rcu(struct rcu_head *head)
133{
134	struct sock_reuseport *reuse;
135
136	reuse = container_of(head, struct sock_reuseport, rcu);
137	sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
138	if (reuse->reuseport_id)
139		ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
140	kfree(reuse);
141}
142
143/**
144 *  reuseport_add_sock - Add a socket to the reuseport group of another.
145 *  @sk:  New socket to add to the group.
146 *  @sk2: Socket belonging to the existing reuseport group.
147 *  @bind_inany: Whether or not the group is bound to a local INANY address.
148 *
149 *  May return ENOMEM and not add socket to group under memory pressure.
150 */
151int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
152{
153	struct sock_reuseport *old_reuse, *reuse;
154
155	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
156		int err = reuseport_alloc(sk2, bind_inany);
157
158		if (err)
159			return err;
160	}
161
162	spin_lock_bh(&reuseport_lock);
163	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
164					  lockdep_is_held(&reuseport_lock));
165	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
166					     lockdep_is_held(&reuseport_lock));
 
 
 
 
 
 
 
 
167	if (old_reuse && old_reuse->num_socks != 1) {
168		spin_unlock_bh(&reuseport_lock);
169		return -EBUSY;
170	}
171
172	if (reuse->num_socks == reuse->max_socks) {
173		reuse = reuseport_grow(reuse);
174		if (!reuse) {
175			spin_unlock_bh(&reuseport_lock);
176			return -ENOMEM;
177		}
178	}
179
180	reuse->socks[reuse->num_socks] = sk;
181	/* paired with smp_rmb() in reuseport_select_sock() */
182	smp_wmb();
183	reuse->num_socks++;
184	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
185
186	spin_unlock_bh(&reuseport_lock);
187
188	if (old_reuse)
189		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
190	return 0;
191}
192EXPORT_SYMBOL(reuseport_add_sock);
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194void reuseport_detach_sock(struct sock *sk)
195{
196	struct sock_reuseport *reuse;
197	int i;
198
199	spin_lock_bh(&reuseport_lock);
200	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
201					  lockdep_is_held(&reuseport_lock));
202
203	/* At least one of the sk in this reuseport group is added to
204	 * a bpf map.  Notify the bpf side.  The bpf map logic will
205	 * remove the sk if it is indeed added to a bpf map.
 
 
 
 
 
 
 
 
206	 */
207	if (reuse->reuseport_id)
208		bpf_sk_reuseport_detach(sk);
209
210	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
211
212	for (i = 0; i < reuse->num_socks; i++) {
213		if (reuse->socks[i] == sk) {
214			reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
215			reuse->num_socks--;
216			if (reuse->num_socks == 0)
217				call_rcu(&reuse->rcu, reuseport_free_rcu);
218			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219		}
 
 
220	}
221	spin_unlock_bh(&reuseport_lock);
 
 
222}
223EXPORT_SYMBOL(reuseport_detach_sock);
224
225static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
226				   struct bpf_prog *prog, struct sk_buff *skb,
227				   int hdr_len)
228{
229	struct sk_buff *nskb = NULL;
230	u32 index;
231
232	if (skb_shared(skb)) {
233		nskb = skb_clone(skb, GFP_ATOMIC);
234		if (!nskb)
235			return NULL;
236		skb = nskb;
237	}
238
239	/* temporarily advance data past protocol header */
240	if (!pskb_pull(skb, hdr_len)) {
241		kfree_skb(nskb);
242		return NULL;
243	}
244	index = bpf_prog_run_save_cb(prog, skb);
245	__skb_push(skb, hdr_len);
246
247	consume_skb(nskb);
248
249	if (index >= socks)
250		return NULL;
251
252	return reuse->socks[index];
253}
254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255/**
256 *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
257 *  @sk: First socket in the group.
258 *  @hash: When no BPF filter is available, use this hash to select.
259 *  @skb: skb to run through BPF filter.
260 *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
261 *    the skb does not yet point at the payload, this parameter represents
262 *    how far the pointer needs to advance to reach the payload.
263 *  Returns a socket that should receive the packet (or NULL on error).
264 */
265struct sock *reuseport_select_sock(struct sock *sk,
266				   u32 hash,
267				   struct sk_buff *skb,
268				   int hdr_len)
269{
270	struct sock_reuseport *reuse;
271	struct bpf_prog *prog;
272	struct sock *sk2 = NULL;
273	u16 socks;
274
275	rcu_read_lock();
276	reuse = rcu_dereference(sk->sk_reuseport_cb);
277
278	/* if memory allocation failed or add call is not yet complete */
279	if (!reuse)
280		goto out;
281
282	prog = rcu_dereference(reuse->prog);
283	socks = READ_ONCE(reuse->num_socks);
284	if (likely(socks)) {
285		/* paired with smp_wmb() in reuseport_add_sock() */
286		smp_rmb();
287
288		if (!prog || !skb)
289			goto select_by_hash;
290
291		if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
292			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
293		else
294			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
295
296select_by_hash:
297		/* no bpf or invalid bpf result: fall back to hash usage */
298		if (!sk2) {
299			int i, j;
300
301			i = j = reciprocal_scale(hash, socks);
302			while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
303				i++;
304				if (i >= reuse->num_socks)
305					i = 0;
306				if (i == j)
307					goto out;
308			}
309			sk2 = reuse->socks[i];
310		}
311	}
312
313out:
314	rcu_read_unlock();
315	return sk2;
316}
317EXPORT_SYMBOL(reuseport_select_sock);
318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
320{
321	struct sock_reuseport *reuse;
322	struct bpf_prog *old_prog;
323
324	if (sk_unhashed(sk) && sk->sk_reuseport) {
325		int err = reuseport_alloc(sk, false);
326
 
 
 
 
327		if (err)
328			return err;
329	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
330		/* The socket wasn't bound with SO_REUSEPORT */
331		return -EINVAL;
332	}
333
334	spin_lock_bh(&reuseport_lock);
335	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
336					  lockdep_is_held(&reuseport_lock));
337	old_prog = rcu_dereference_protected(reuse->prog,
338					     lockdep_is_held(&reuseport_lock));
339	rcu_assign_pointer(reuse->prog, prog);
340	spin_unlock_bh(&reuseport_lock);
341
342	sk_reuseport_prog_free(old_prog);
343	return 0;
344}
345EXPORT_SYMBOL(reuseport_attach_prog);
346
347int reuseport_detach_prog(struct sock *sk)
348{
349	struct sock_reuseport *reuse;
350	struct bpf_prog *old_prog;
351
352	if (!rcu_access_pointer(sk->sk_reuseport_cb))
353		return sk->sk_reuseport ? -ENOENT : -EINVAL;
354
355	old_prog = NULL;
356	spin_lock_bh(&reuseport_lock);
357	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
358					  lockdep_is_held(&reuseport_lock));
359	rcu_swap_protected(reuse->prog, old_prog,
360			   lockdep_is_held(&reuseport_lock));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361	spin_unlock_bh(&reuseport_lock);
362
363	if (!old_prog)
364		return -ENOENT;
365
366	sk_reuseport_prog_free(old_prog);
367	return 0;
368}
369EXPORT_SYMBOL(reuseport_detach_prog);