Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
10#include <net/xsk_buff_pool.h>
11
12#ifdef CONFIG_XDP_SOCKETS
13
14void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
16u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max);
17void xsk_tx_release(struct xsk_buff_pool *pool);
18struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
19 u16 queue_id);
20void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
21void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
22void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
23void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
24bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
25
26static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
27{
28 return XDP_PACKET_HEADROOM + pool->headroom;
29}
30
31static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
32{
33 return pool->chunk_size;
34}
35
36static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
37{
38 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
39}
40
41static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
42 struct xdp_rxq_info *rxq)
43{
44 xp_set_rxq_info(pool, rxq);
45}
46
47static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
48 unsigned long attrs)
49{
50 xp_dma_unmap(pool, attrs);
51}
52
53static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
54 struct device *dev, unsigned long attrs)
55{
56 struct xdp_umem *umem = pool->umem;
57
58 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
59}
60
61static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
62{
63 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
64
65 return xp_get_dma(xskb);
66}
67
68static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
69{
70 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
71
72 return xp_get_frame_dma(xskb);
73}
74
75static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
76{
77 return xp_alloc(pool);
78}
79
80static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
81{
82 return xp_can_alloc(pool, count);
83}
84
85static inline void xsk_buff_free(struct xdp_buff *xdp)
86{
87 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88
89 xp_free(xskb);
90}
91
92static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
93 u64 addr)
94{
95 return xp_raw_get_dma(pool, addr);
96}
97
98static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
99{
100 return xp_raw_get_data(pool, addr);
101}
102
103static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
104{
105 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
106
107 if (!pool->dma_need_sync)
108 return;
109
110 xp_dma_sync_for_cpu(xskb);
111}
112
113static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
114 dma_addr_t dma,
115 size_t size)
116{
117 xp_dma_sync_for_device(pool, dma, size);
118}
119
120#else
121
122static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
123{
124}
125
126static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
127 struct xdp_desc *desc)
128{
129 return false;
130}
131
132static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc,
133 u32 max)
134{
135 return 0;
136}
137
138static inline void xsk_tx_release(struct xsk_buff_pool *pool)
139{
140}
141
142static inline struct xsk_buff_pool *
143xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
144{
145 return NULL;
146}
147
148static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
149{
150}
151
152static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
153{
154}
155
156static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
157{
158}
159
160static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
161{
162}
163
164static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
165{
166 return false;
167}
168
169static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
170{
171 return 0;
172}
173
174static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
175{
176 return 0;
177}
178
179static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
180{
181 return 0;
182}
183
184static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
185 struct xdp_rxq_info *rxq)
186{
187}
188
189static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
190 unsigned long attrs)
191{
192}
193
194static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
195 struct device *dev, unsigned long attrs)
196{
197 return 0;
198}
199
200static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
201{
202 return 0;
203}
204
205static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
206{
207 return 0;
208}
209
210static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
211{
212 return NULL;
213}
214
215static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
216{
217 return false;
218}
219
220static inline void xsk_buff_free(struct xdp_buff *xdp)
221{
222}
223
224static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
225 u64 addr)
226{
227 return 0;
228}
229
230static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
231{
232 return NULL;
233}
234
235static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
236{
237}
238
239static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
240 dma_addr_t dma,
241 size_t size)
242{
243}
244
245#endif /* CONFIG_XDP_SOCKETS */
246
247#endif /* _LINUX_XDP_SOCK_DRV_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
10#include <net/xsk_buff_pool.h>
11
12#define XDP_UMEM_MIN_CHUNK_SHIFT 11
13#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14
15struct xsk_cb_desc {
16 void *src;
17 u8 off;
18 u8 bytes;
19};
20
21#ifdef CONFIG_XDP_SOCKETS
22
23void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26void xsk_tx_release(struct xsk_buff_pool *pool);
27struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 u16 queue_id);
29void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34
35static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36{
37 return XDP_PACKET_HEADROOM + pool->headroom;
38}
39
40static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41{
42 return pool->chunk_size;
43}
44
45static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46{
47 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48}
49
50static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 struct xdp_rxq_info *rxq)
52{
53 xp_set_rxq_info(pool, rxq);
54}
55
56static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 struct xsk_cb_desc *desc)
58{
59 xp_fill_cb(pool, desc);
60}
61
62static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
63 unsigned long attrs)
64{
65 xp_dma_unmap(pool, attrs);
66}
67
68static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
69 struct device *dev, unsigned long attrs)
70{
71 struct xdp_umem *umem = pool->umem;
72
73 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
74}
75
76static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
77{
78 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
79
80 return xp_get_dma(xskb);
81}
82
83static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
84{
85 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
86
87 return xp_get_frame_dma(xskb);
88}
89
90static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
91{
92 return xp_alloc(pool);
93}
94
95static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
96{
97 return !xp_mb_desc(desc);
98}
99
100/* Returns as many entries as possible up to max. 0 <= N <= max. */
101static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
102{
103 return xp_alloc_batch(pool, xdp, max);
104}
105
106static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
107{
108 return xp_can_alloc(pool, count);
109}
110
111static inline void xsk_buff_free(struct xdp_buff *xdp)
112{
113 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
114 struct list_head *xskb_list = &xskb->pool->xskb_list;
115 struct xdp_buff_xsk *pos, *tmp;
116
117 if (likely(!xdp_buff_has_frags(xdp)))
118 goto out;
119
120 list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
121 list_del(&pos->list_node);
122 xp_free(pos);
123 }
124
125 xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
126out:
127 xp_free(xskb);
128}
129
130static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
131{
132 struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
133
134 list_add_tail(&frag->list_node, &frag->pool->xskb_list);
135}
136
137static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
138{
139 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
140 struct xdp_buff *ret = NULL;
141 struct xdp_buff_xsk *frag;
142
143 frag = list_first_entry_or_null(&xskb->pool->xskb_list,
144 struct xdp_buff_xsk, list_node);
145 if (frag) {
146 list_del(&frag->list_node);
147 ret = &frag->xdp;
148 }
149
150 return ret;
151}
152
153static inline void xsk_buff_del_tail(struct xdp_buff *tail)
154{
155 struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
156
157 list_del(&xskb->list_node);
158}
159
160static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
161{
162 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
163 struct xdp_buff_xsk *frag;
164
165 frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
166 list_node);
167 return &frag->xdp;
168}
169
170static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
171{
172 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
173 xdp->data_meta = xdp->data;
174 xdp->data_end = xdp->data + size;
175 xdp->flags = 0;
176}
177
178static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
179 u64 addr)
180{
181 return xp_raw_get_dma(pool, addr);
182}
183
184static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
185{
186 return xp_raw_get_data(pool, addr);
187}
188
189#define XDP_TXMD_FLAGS_VALID ( \
190 XDP_TXMD_FLAGS_TIMESTAMP | \
191 XDP_TXMD_FLAGS_CHECKSUM | \
192 0)
193
194static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
195{
196 return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
197}
198
199static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
200{
201 struct xsk_tx_metadata *meta;
202
203 if (!pool->tx_metadata_len)
204 return NULL;
205
206 meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
207 if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
208 return NULL; /* no way to signal the error to the user */
209
210 return meta;
211}
212
213static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
214{
215 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
216
217 xp_dma_sync_for_cpu(xskb);
218}
219
220static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
221 dma_addr_t dma,
222 size_t size)
223{
224 xp_dma_sync_for_device(pool, dma, size);
225}
226
227#else
228
229static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
230{
231}
232
233static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
234 struct xdp_desc *desc)
235{
236 return false;
237}
238
239static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
240{
241 return 0;
242}
243
244static inline void xsk_tx_release(struct xsk_buff_pool *pool)
245{
246}
247
248static inline struct xsk_buff_pool *
249xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
250{
251 return NULL;
252}
253
254static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
255{
256}
257
258static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
259{
260}
261
262static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
263{
264}
265
266static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
267{
268}
269
270static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
271{
272 return false;
273}
274
275static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
276{
277 return 0;
278}
279
280static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
281{
282 return 0;
283}
284
285static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
286{
287 return 0;
288}
289
290static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
291 struct xdp_rxq_info *rxq)
292{
293}
294
295static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
296 struct xsk_cb_desc *desc)
297{
298}
299
300static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
301 unsigned long attrs)
302{
303}
304
305static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
306 struct device *dev, unsigned long attrs)
307{
308 return 0;
309}
310
311static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
312{
313 return 0;
314}
315
316static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
317{
318 return 0;
319}
320
321static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
322{
323 return NULL;
324}
325
326static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
327{
328 return false;
329}
330
331static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
332{
333 return 0;
334}
335
336static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
337{
338 return false;
339}
340
341static inline void xsk_buff_free(struct xdp_buff *xdp)
342{
343}
344
345static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
346{
347}
348
349static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
350{
351 return NULL;
352}
353
354static inline void xsk_buff_del_tail(struct xdp_buff *tail)
355{
356}
357
358static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
359{
360 return NULL;
361}
362
363static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
364{
365}
366
367static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
368 u64 addr)
369{
370 return 0;
371}
372
373static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
374{
375 return NULL;
376}
377
378static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
379{
380 return false;
381}
382
383static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
384{
385 return NULL;
386}
387
388static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
389{
390}
391
392static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
393 dma_addr_t dma,
394 size_t size)
395{
396}
397
398#endif /* CONFIG_XDP_SOCKETS */
399
400#endif /* _LINUX_XDP_SOCK_DRV_H */