Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SOCK_REUSEPORT_H
3#define _SOCK_REUSEPORT_H
4
5#include <linux/filter.h>
6#include <linux/skbuff.h>
7#include <linux/types.h>
8#include <linux/spinlock.h>
9#include <net/sock.h>
10
11extern spinlock_t reuseport_lock;
12
13struct sock_reuseport {
14 struct rcu_head rcu;
15
16 u16 max_socks; /* length of socks */
17 u16 num_socks; /* elements in socks */
18 u16 num_closed_socks; /* closed elements in socks */
19 /* The last synq overflow event timestamp of this
20 * reuse->socks[] group.
21 */
22 unsigned int synq_overflow_ts;
23 /* ID stays the same even after the size of socks[] grows. */
24 unsigned int reuseport_id;
25 unsigned int bind_inany:1;
26 unsigned int has_conns:1;
27 struct bpf_prog __rcu *prog; /* optional BPF sock selector */
28 struct sock *socks[]; /* array of sock pointers */
29};
30
31extern int reuseport_alloc(struct sock *sk, bool bind_inany);
32extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
33 bool bind_inany);
34extern void reuseport_detach_sock(struct sock *sk);
35void reuseport_stop_listen_sock(struct sock *sk);
36extern struct sock *reuseport_select_sock(struct sock *sk,
37 u32 hash,
38 struct sk_buff *skb,
39 int hdr_len);
40struct sock *reuseport_migrate_sock(struct sock *sk,
41 struct sock *migrating_sk,
42 struct sk_buff *skb);
43extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
44extern int reuseport_detach_prog(struct sock *sk);
45
46static inline bool reuseport_has_conns(struct sock *sk, bool set)
47{
48 struct sock_reuseport *reuse;
49 bool ret = false;
50
51 rcu_read_lock();
52 reuse = rcu_dereference(sk->sk_reuseport_cb);
53 if (reuse) {
54 if (set)
55 reuse->has_conns = 1;
56 ret = reuse->has_conns;
57 }
58 rcu_read_unlock();
59
60 return ret;
61}
62
63#endif /* _SOCK_REUSEPORT_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SOCK_REUSEPORT_H
3#define _SOCK_REUSEPORT_H
4
5#include <linux/filter.h>
6#include <linux/skbuff.h>
7#include <linux/types.h>
8#include <linux/spinlock.h>
9#include <net/sock.h>
10
11extern spinlock_t reuseport_lock;
12
13struct sock_reuseport {
14 struct rcu_head rcu;
15
16 u16 max_socks; /* length of socks */
17 u16 num_socks; /* elements in socks */
18 u16 num_closed_socks; /* closed elements in socks */
19 u16 incoming_cpu;
20 /* The last synq overflow event timestamp of this
21 * reuse->socks[] group.
22 */
23 unsigned int synq_overflow_ts;
24 /* ID stays the same even after the size of socks[] grows. */
25 unsigned int reuseport_id;
26 unsigned int bind_inany:1;
27 unsigned int has_conns:1;
28 struct bpf_prog __rcu *prog; /* optional BPF sock selector */
29 struct sock *socks[]; /* array of sock pointers */
30};
31
32extern int reuseport_alloc(struct sock *sk, bool bind_inany);
33extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
34 bool bind_inany);
35extern void reuseport_detach_sock(struct sock *sk);
36void reuseport_stop_listen_sock(struct sock *sk);
37extern struct sock *reuseport_select_sock(struct sock *sk,
38 u32 hash,
39 struct sk_buff *skb,
40 int hdr_len);
41struct sock *reuseport_migrate_sock(struct sock *sk,
42 struct sock *migrating_sk,
43 struct sk_buff *skb);
44extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
45extern int reuseport_detach_prog(struct sock *sk);
46
47static inline bool reuseport_has_conns(struct sock *sk)
48{
49 struct sock_reuseport *reuse;
50 bool ret = false;
51
52 rcu_read_lock();
53 reuse = rcu_dereference(sk->sk_reuseport_cb);
54 if (reuse && reuse->has_conns)
55 ret = true;
56 rcu_read_unlock();
57
58 return ret;
59}
60
61void reuseport_has_conns_set(struct sock *sk);
62void reuseport_update_incoming_cpu(struct sock *sk, int val);
63
64#endif /* _SOCK_REUSEPORT_H */