Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SOCK_REUSEPORT_H
3#define _SOCK_REUSEPORT_H
4
5#include <linux/filter.h>
6#include <linux/skbuff.h>
7#include <linux/types.h>
8#include <linux/spinlock.h>
9#include <net/sock.h>
10
11extern spinlock_t reuseport_lock;
12
13struct sock_reuseport {
14 struct rcu_head rcu;
15
16 u16 max_socks; /* length of socks */
17 u16 num_socks; /* elements in socks */
18 /* The last synq overflow event timestamp of this
19 * reuse->socks[] group.
20 */
21 unsigned int synq_overflow_ts;
22 /* ID stays the same even after the size of socks[] grows. */
23 unsigned int reuseport_id;
24 unsigned int bind_inany:1;
25 unsigned int has_conns:1;
26 struct bpf_prog __rcu *prog; /* optional BPF sock selector */
27 struct sock *socks[0]; /* array of sock pointers */
28};
29
30extern int reuseport_alloc(struct sock *sk, bool bind_inany);
31extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
32 bool bind_inany);
33extern void reuseport_detach_sock(struct sock *sk);
34extern struct sock *reuseport_select_sock(struct sock *sk,
35 u32 hash,
36 struct sk_buff *skb,
37 int hdr_len);
38extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
39extern int reuseport_detach_prog(struct sock *sk);
40
41static inline bool reuseport_has_conns(struct sock *sk, bool set)
42{
43 struct sock_reuseport *reuse;
44 bool ret = false;
45
46 rcu_read_lock();
47 reuse = rcu_dereference(sk->sk_reuseport_cb);
48 if (reuse) {
49 if (set)
50 reuse->has_conns = 1;
51 ret = reuse->has_conns;
52 }
53 rcu_read_unlock();
54
55 return ret;
56}
57
58int reuseport_get_id(struct sock_reuseport *reuse);
59
60#endif /* _SOCK_REUSEPORT_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SOCK_REUSEPORT_H
3#define _SOCK_REUSEPORT_H
4
5#include <linux/filter.h>
6#include <linux/skbuff.h>
7#include <linux/types.h>
8#include <linux/spinlock.h>
9#include <net/sock.h>
10
11extern spinlock_t reuseport_lock;
12
13struct sock_reuseport {
14 struct rcu_head rcu;
15
16 u16 max_socks; /* length of socks */
17 u16 num_socks; /* elements in socks */
18 u16 num_closed_socks; /* closed elements in socks */
19 u16 incoming_cpu;
20 /* The last synq overflow event timestamp of this
21 * reuse->socks[] group.
22 */
23 unsigned int synq_overflow_ts;
24 /* ID stays the same even after the size of socks[] grows. */
25 unsigned int reuseport_id;
26 unsigned int bind_inany:1;
27 unsigned int has_conns:1;
28 struct bpf_prog __rcu *prog; /* optional BPF sock selector */
29 struct sock *socks[]; /* array of sock pointers */
30};
31
32extern int reuseport_alloc(struct sock *sk, bool bind_inany);
33extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
34 bool bind_inany);
35extern void reuseport_detach_sock(struct sock *sk);
36void reuseport_stop_listen_sock(struct sock *sk);
37extern struct sock *reuseport_select_sock(struct sock *sk,
38 u32 hash,
39 struct sk_buff *skb,
40 int hdr_len);
41struct sock *reuseport_migrate_sock(struct sock *sk,
42 struct sock *migrating_sk,
43 struct sk_buff *skb);
44extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
45extern int reuseport_detach_prog(struct sock *sk);
46
47static inline bool reuseport_has_conns(struct sock *sk)
48{
49 struct sock_reuseport *reuse;
50 bool ret = false;
51
52 rcu_read_lock();
53 reuse = rcu_dereference(sk->sk_reuseport_cb);
54 if (reuse && reuse->has_conns)
55 ret = true;
56 rcu_read_unlock();
57
58 return ret;
59}
60
61void reuseport_has_conns_set(struct sock *sk);
62void reuseport_update_incoming_cpu(struct sock *sk, int val);
63
64#endif /* _SOCK_REUSEPORT_H */