Loading...
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * AF_XDP user-space access library.
5 *
6 * Copyright (c) 2018 - 2019 Intel Corporation.
7 * Copyright (c) 2019 Facebook
8 *
9 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#ifndef __LIBBPF_XSK_H
13#define __LIBBPF_XSK_H
14
15#include <stdio.h>
16#include <stdint.h>
17#include <stdbool.h>
18#include <linux/if_xdp.h>
19
20#include "libbpf.h"
21
22#ifdef __cplusplus
23extern "C" {
24#endif
25
26/* Load-Acquire Store-Release barriers used by the XDP socket
27 * library. The following macros should *NOT* be considered part of
28 * the xsk.h API, and is subject to change anytime.
29 *
30 * LIBRARY INTERNAL
31 */
32
33#define __XSK_READ_ONCE(x) (*(volatile typeof(x) *)&x)
34#define __XSK_WRITE_ONCE(x, v) (*(volatile typeof(x) *)&x) = (v)
35
36#if defined(__i386__) || defined(__x86_64__)
37# define libbpf_smp_store_release(p, v) \
38 do { \
39 asm volatile("" : : : "memory"); \
40 __XSK_WRITE_ONCE(*p, v); \
41 } while (0)
42# define libbpf_smp_load_acquire(p) \
43 ({ \
44 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
45 asm volatile("" : : : "memory"); \
46 ___p1; \
47 })
48#elif defined(__aarch64__)
49# define libbpf_smp_store_release(p, v) \
50 asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
51# define libbpf_smp_load_acquire(p) \
52 ({ \
53 typeof(*p) ___p1; \
54 asm volatile ("ldar %w0, %1" \
55 : "=r" (___p1) : "Q" (*p) : "memory"); \
56 ___p1; \
57 })
58#elif defined(__riscv)
59# define libbpf_smp_store_release(p, v) \
60 do { \
61 asm volatile ("fence rw,w" : : : "memory"); \
62 __XSK_WRITE_ONCE(*p, v); \
63 } while (0)
64# define libbpf_smp_load_acquire(p) \
65 ({ \
66 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
67 asm volatile ("fence r,rw" : : : "memory"); \
68 ___p1; \
69 })
70#endif
71
72#ifndef libbpf_smp_store_release
73#define libbpf_smp_store_release(p, v) \
74 do { \
75 __sync_synchronize(); \
76 __XSK_WRITE_ONCE(*p, v); \
77 } while (0)
78#endif
79
80#ifndef libbpf_smp_load_acquire
81#define libbpf_smp_load_acquire(p) \
82 ({ \
83 typeof(*p) ___p1 = __XSK_READ_ONCE(*p); \
84 __sync_synchronize(); \
85 ___p1; \
86 })
87#endif
88
89/* LIBRARY INTERNAL -- END */
90
91/* Do not access these members directly. Use the functions below. */
92#define DEFINE_XSK_RING(name) \
93struct name { \
94 __u32 cached_prod; \
95 __u32 cached_cons; \
96 __u32 mask; \
97 __u32 size; \
98 __u32 *producer; \
99 __u32 *consumer; \
100 void *ring; \
101 __u32 *flags; \
102}
103
104DEFINE_XSK_RING(xsk_ring_prod);
105DEFINE_XSK_RING(xsk_ring_cons);
106
107/* For a detailed explanation on the memory barriers associated with the
108 * ring, please take a look at net/xdp/xsk_queue.h.
109 */
110
111struct xsk_umem;
112struct xsk_socket;
113
114static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
115 __u32 idx)
116{
117 __u64 *addrs = (__u64 *)fill->ring;
118
119 return &addrs[idx & fill->mask];
120}
121
122static inline const __u64 *
123xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
124{
125 const __u64 *addrs = (const __u64 *)comp->ring;
126
127 return &addrs[idx & comp->mask];
128}
129
130static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
131 __u32 idx)
132{
133 struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
134
135 return &descs[idx & tx->mask];
136}
137
138static inline const struct xdp_desc *
139xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
140{
141 const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
142
143 return &descs[idx & rx->mask];
144}
145
146static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
147{
148 return *r->flags & XDP_RING_NEED_WAKEUP;
149}
150
151static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
152{
153 __u32 free_entries = r->cached_cons - r->cached_prod;
154
155 if (free_entries >= nb)
156 return free_entries;
157
158 /* Refresh the local tail pointer.
159 * cached_cons is r->size bigger than the real consumer pointer so
160 * that this addition can be avoided in the more frequently
161 * executed code that computs free_entries in the beginning of
162 * this function. Without this optimization it whould have been
163 * free_entries = r->cached_prod - r->cached_cons + r->size.
164 */
165 r->cached_cons = libbpf_smp_load_acquire(r->consumer);
166 r->cached_cons += r->size;
167
168 return r->cached_cons - r->cached_prod;
169}
170
171static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
172{
173 __u32 entries = r->cached_prod - r->cached_cons;
174
175 if (entries == 0) {
176 r->cached_prod = libbpf_smp_load_acquire(r->producer);
177 entries = r->cached_prod - r->cached_cons;
178 }
179
180 return (entries > nb) ? nb : entries;
181}
182
183static inline __u32 xsk_ring_prod__reserve(struct xsk_ring_prod *prod, __u32 nb, __u32 *idx)
184{
185 if (xsk_prod_nb_free(prod, nb) < nb)
186 return 0;
187
188 *idx = prod->cached_prod;
189 prod->cached_prod += nb;
190
191 return nb;
192}
193
194static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
195{
196 /* Make sure everything has been written to the ring before indicating
197 * this to the kernel by writing the producer pointer.
198 */
199 libbpf_smp_store_release(prod->producer, *prod->producer + nb);
200}
201
202static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
203{
204 __u32 entries = xsk_cons_nb_avail(cons, nb);
205
206 if (entries > 0) {
207 *idx = cons->cached_cons;
208 cons->cached_cons += entries;
209 }
210
211 return entries;
212}
213
214static inline void xsk_ring_cons__cancel(struct xsk_ring_cons *cons, __u32 nb)
215{
216 cons->cached_cons -= nb;
217}
218
219static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
220{
221 /* Make sure data has been read before indicating we are done
222 * with the entries by updating the consumer pointer.
223 */
224 libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
225
226}
227
228static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
229{
230 return &((char *)umem_area)[addr];
231}
232
233static inline __u64 xsk_umem__extract_addr(__u64 addr)
234{
235 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
236}
237
238static inline __u64 xsk_umem__extract_offset(__u64 addr)
239{
240 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
241}
242
243static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
244{
245 return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
246}
247
248LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
249LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
250
251#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
252#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
253#define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
254#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
255#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
256#define XSK_UMEM__DEFAULT_FLAGS 0
257
258struct xsk_umem_config {
259 __u32 fill_size;
260 __u32 comp_size;
261 __u32 frame_size;
262 __u32 frame_headroom;
263 __u32 flags;
264};
265
266LIBBPF_API int xsk_setup_xdp_prog(int ifindex,
267 int *xsks_map_fd);
268LIBBPF_API int xsk_socket__update_xskmap(struct xsk_socket *xsk,
269 int xsks_map_fd);
270
271/* Flags for the libbpf_flags field. */
272#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
273
274struct xsk_socket_config {
275 __u32 rx_size;
276 __u32 tx_size;
277 __u32 libbpf_flags;
278 __u32 xdp_flags;
279 __u16 bind_flags;
280};
281
282/* Set config to NULL to get the default configuration. */
283LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
284 void *umem_area, __u64 size,
285 struct xsk_ring_prod *fill,
286 struct xsk_ring_cons *comp,
287 const struct xsk_umem_config *config);
288LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
289 void *umem_area, __u64 size,
290 struct xsk_ring_prod *fill,
291 struct xsk_ring_cons *comp,
292 const struct xsk_umem_config *config);
293LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
294 void *umem_area, __u64 size,
295 struct xsk_ring_prod *fill,
296 struct xsk_ring_cons *comp,
297 const struct xsk_umem_config *config);
298LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
299 const char *ifname, __u32 queue_id,
300 struct xsk_umem *umem,
301 struct xsk_ring_cons *rx,
302 struct xsk_ring_prod *tx,
303 const struct xsk_socket_config *config);
304LIBBPF_API int
305xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
306 const char *ifname,
307 __u32 queue_id, struct xsk_umem *umem,
308 struct xsk_ring_cons *rx,
309 struct xsk_ring_prod *tx,
310 struct xsk_ring_prod *fill,
311 struct xsk_ring_cons *comp,
312 const struct xsk_socket_config *config);
313
314/* Returns 0 for success and -EBUSY if the umem is still in use. */
315LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
316LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
317
318#ifdef __cplusplus
319} /* extern "C" */
320#endif
321
322#endif /* __LIBBPF_XSK_H */
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * AF_XDP user-space access library.
5 *
6 * Copyright(c) 2018 - 2019 Intel Corporation.
7 *
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
9 */
10
11#ifndef __LIBBPF_XSK_H
12#define __LIBBPF_XSK_H
13
14#include <stdio.h>
15#include <stdint.h>
16#include <linux/if_xdp.h>
17
18#include "libbpf.h"
19#include "libbpf_util.h"
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
25/* Do not access these members directly. Use the functions below. */
26#define DEFINE_XSK_RING(name) \
27struct name { \
28 __u32 cached_prod; \
29 __u32 cached_cons; \
30 __u32 mask; \
31 __u32 size; \
32 __u32 *producer; \
33 __u32 *consumer; \
34 void *ring; \
35 __u32 *flags; \
36}
37
38DEFINE_XSK_RING(xsk_ring_prod);
39DEFINE_XSK_RING(xsk_ring_cons);
40
41/* For a detailed explanation on the memory barriers associated with the
42 * ring, please take a look at net/xdp/xsk_queue.h.
43 */
44
45struct xsk_umem;
46struct xsk_socket;
47
48static inline __u64 *xsk_ring_prod__fill_addr(struct xsk_ring_prod *fill,
49 __u32 idx)
50{
51 __u64 *addrs = (__u64 *)fill->ring;
52
53 return &addrs[idx & fill->mask];
54}
55
56static inline const __u64 *
57xsk_ring_cons__comp_addr(const struct xsk_ring_cons *comp, __u32 idx)
58{
59 const __u64 *addrs = (const __u64 *)comp->ring;
60
61 return &addrs[idx & comp->mask];
62}
63
64static inline struct xdp_desc *xsk_ring_prod__tx_desc(struct xsk_ring_prod *tx,
65 __u32 idx)
66{
67 struct xdp_desc *descs = (struct xdp_desc *)tx->ring;
68
69 return &descs[idx & tx->mask];
70}
71
72static inline const struct xdp_desc *
73xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
74{
75 const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring;
76
77 return &descs[idx & rx->mask];
78}
79
80static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
81{
82 return *r->flags & XDP_RING_NEED_WAKEUP;
83}
84
85static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
86{
87 __u32 free_entries = r->cached_cons - r->cached_prod;
88
89 if (free_entries >= nb)
90 return free_entries;
91
92 /* Refresh the local tail pointer.
93 * cached_cons is r->size bigger than the real consumer pointer so
94 * that this addition can be avoided in the more frequently
95 * executed code that computs free_entries in the beginning of
96 * this function. Without this optimization it whould have been
97 * free_entries = r->cached_prod - r->cached_cons + r->size.
98 */
99 r->cached_cons = *r->consumer + r->size;
100
101 return r->cached_cons - r->cached_prod;
102}
103
104static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
105{
106 __u32 entries = r->cached_prod - r->cached_cons;
107
108 if (entries == 0) {
109 r->cached_prod = *r->producer;
110 entries = r->cached_prod - r->cached_cons;
111 }
112
113 return (entries > nb) ? nb : entries;
114}
115
116static inline size_t xsk_ring_prod__reserve(struct xsk_ring_prod *prod,
117 size_t nb, __u32 *idx)
118{
119 if (xsk_prod_nb_free(prod, nb) < nb)
120 return 0;
121
122 *idx = prod->cached_prod;
123 prod->cached_prod += nb;
124
125 return nb;
126}
127
128static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, size_t nb)
129{
130 /* Make sure everything has been written to the ring before indicating
131 * this to the kernel by writing the producer pointer.
132 */
133 libbpf_smp_wmb();
134
135 *prod->producer += nb;
136}
137
138static inline size_t xsk_ring_cons__peek(struct xsk_ring_cons *cons,
139 size_t nb, __u32 *idx)
140{
141 size_t entries = xsk_cons_nb_avail(cons, nb);
142
143 if (entries > 0) {
144 /* Make sure we do not speculatively read the data before
145 * we have received the packet buffers from the ring.
146 */
147 libbpf_smp_rmb();
148
149 *idx = cons->cached_cons;
150 cons->cached_cons += entries;
151 }
152
153 return entries;
154}
155
156static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
157{
158 /* Make sure data has been read before indicating we are done
159 * with the entries by updating the consumer pointer.
160 */
161 libbpf_smp_rwmb();
162
163 *cons->consumer += nb;
164}
165
166static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
167{
168 return &((char *)umem_area)[addr];
169}
170
171static inline __u64 xsk_umem__extract_addr(__u64 addr)
172{
173 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
174}
175
176static inline __u64 xsk_umem__extract_offset(__u64 addr)
177{
178 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
179}
180
181static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
182{
183 return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
184}
185
186LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
187LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
188
189#define XSK_RING_CONS__DEFAULT_NUM_DESCS 2048
190#define XSK_RING_PROD__DEFAULT_NUM_DESCS 2048
191#define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
192#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
193#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
194#define XSK_UMEM__DEFAULT_FLAGS 0
195
196struct xsk_umem_config {
197 __u32 fill_size;
198 __u32 comp_size;
199 __u32 frame_size;
200 __u32 frame_headroom;
201 __u32 flags;
202};
203
204/* Flags for the libbpf_flags field. */
205#define XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD (1 << 0)
206
207struct xsk_socket_config {
208 __u32 rx_size;
209 __u32 tx_size;
210 __u32 libbpf_flags;
211 __u32 xdp_flags;
212 __u16 bind_flags;
213};
214
215/* Set config to NULL to get the default configuration. */
216LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
217 void *umem_area, __u64 size,
218 struct xsk_ring_prod *fill,
219 struct xsk_ring_cons *comp,
220 const struct xsk_umem_config *config);
221LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
222 void *umem_area, __u64 size,
223 struct xsk_ring_prod *fill,
224 struct xsk_ring_cons *comp,
225 const struct xsk_umem_config *config);
226LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
227 void *umem_area, __u64 size,
228 struct xsk_ring_prod *fill,
229 struct xsk_ring_cons *comp,
230 const struct xsk_umem_config *config);
231LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
232 const char *ifname, __u32 queue_id,
233 struct xsk_umem *umem,
234 struct xsk_ring_cons *rx,
235 struct xsk_ring_prod *tx,
236 const struct xsk_socket_config *config);
237
238/* Returns 0 for success and -EBUSY if the umem is still in use. */
239LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
240LIBBPF_API void xsk_socket__delete(struct xsk_socket *xsk);
241
242#ifdef __cplusplus
243} /* extern "C" */
244#endif
245
246#endif /* __LIBBPF_XSK_H */