Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
9#include <linux/bpf.h>
10#include <linux/workqueue.h>
11#include <linux/if_xdp.h>
12#include <linux/mutex.h>
13#include <linux/spinlock.h>
14#include <linux/mm.h>
15#include <net/sock.h>
16
17struct net_device;
18struct xsk_queue;
19struct xdp_buff;
20
21struct xdp_umem {
22 void *addrs;
23 u64 size;
24 u32 headroom;
25 u32 chunk_size;
26 u32 chunks;
27 u32 npgs;
28 struct user_struct *user;
29 refcount_t users;
30 u8 flags;
31 bool zc;
32 struct page **pgs;
33 int id;
34 struct list_head xsk_dma_list;
35 struct work_struct work;
36};
37
38struct xsk_map {
39 struct bpf_map map;
40 spinlock_t lock; /* Synchronize map updates */
41 struct xdp_sock __rcu *xsk_map[];
42};
43
44struct xdp_sock {
45 /* struct sock must be the first member of struct xdp_sock */
46 struct sock sk;
47 struct xsk_queue *rx ____cacheline_aligned_in_smp;
48 struct net_device *dev;
49 struct xdp_umem *umem;
50 struct list_head flush_node;
51 struct xsk_buff_pool *pool;
52 u16 queue_id;
53 bool zc;
54 enum {
55 XSK_READY = 0,
56 XSK_BOUND,
57 XSK_UNBOUND,
58 } state;
59
60 struct xsk_queue *tx ____cacheline_aligned_in_smp;
61 struct list_head tx_list;
62 /* Protects generic receive. */
63 spinlock_t rx_lock;
64
65 /* Statistics */
66 u64 rx_dropped;
67 u64 rx_queue_full;
68
69 struct list_head map_list;
70 /* Protects map_list */
71 spinlock_t map_list_lock;
72 /* Protects multiple processes in the control path */
73 struct mutex mutex;
74 struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
75 struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
76};
77
78#ifdef CONFIG_XDP_SOCKETS
79
80int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
81int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
82void __xsk_map_flush(void);
83
84#else
85
86static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
87{
88 return -ENOTSUPP;
89}
90
91static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
92{
93 return -EOPNOTSUPP;
94}
95
96static inline void __xsk_map_flush(void)
97{
98}
99
100#endif /* CONFIG_XDP_SOCKETS */
101
102#endif /* _LINUX_XDP_SOCK_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
9#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
13#include <linux/mm.h>
14#include <net/sock.h>
15
16struct net_device;
17struct xsk_queue;
18struct xdp_buff;
19
20struct xdp_umem {
21 struct xsk_queue *fq;
22 struct xsk_queue *cq;
23 struct xsk_buff_pool *pool;
24 u64 size;
25 u32 headroom;
26 u32 chunk_size;
27 struct user_struct *user;
28 refcount_t users;
29 struct work_struct work;
30 struct page **pgs;
31 u32 npgs;
32 u16 queue_id;
33 u8 need_wakeup;
34 u8 flags;
35 int id;
36 struct net_device *dev;
37 bool zc;
38 spinlock_t xsk_tx_list_lock;
39 struct list_head xsk_tx_list;
40};
41
42struct xsk_map {
43 struct bpf_map map;
44 spinlock_t lock; /* Synchronize map updates */
45 struct xdp_sock *xsk_map[];
46};
47
48struct xdp_sock {
49 /* struct sock must be the first member of struct xdp_sock */
50 struct sock sk;
51 struct xsk_queue *rx;
52 struct net_device *dev;
53 struct xdp_umem *umem;
54 struct list_head flush_node;
55 u16 queue_id;
56 bool zc;
57 enum {
58 XSK_READY = 0,
59 XSK_BOUND,
60 XSK_UNBOUND,
61 } state;
62 /* Protects multiple processes in the control path */
63 struct mutex mutex;
64 struct xsk_queue *tx ____cacheline_aligned_in_smp;
65 struct list_head list;
66 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
67 * in the SKB destructor callback.
68 */
69 spinlock_t tx_completion_lock;
70 /* Protects generic receive. */
71 spinlock_t rx_lock;
72
73 /* Statistics */
74 u64 rx_dropped;
75 u64 rx_queue_full;
76
77 struct list_head map_list;
78 /* Protects map_list */
79 spinlock_t map_list_lock;
80};
81
82#ifdef CONFIG_XDP_SOCKETS
83
84int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
85int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
86void __xsk_map_flush(void);
87
88static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
89 u32 key)
90{
91 struct xsk_map *m = container_of(map, struct xsk_map, map);
92 struct xdp_sock *xs;
93
94 if (key >= map->max_entries)
95 return NULL;
96
97 xs = READ_ONCE(m->xsk_map[key]);
98 return xs;
99}
100
101#else
102
103static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
104{
105 return -ENOTSUPP;
106}
107
108static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
109{
110 return -EOPNOTSUPP;
111}
112
113static inline void __xsk_map_flush(void)
114{
115}
116
117static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
118 u32 key)
119{
120 return NULL;
121}
122
123#endif /* CONFIG_XDP_SOCKETS */
124
125#endif /* _LINUX_XDP_SOCK_H */