Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
10#include <net/xsk_buff_pool.h>
11
12#define XDP_UMEM_MIN_CHUNK_SHIFT 11
13#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14
15struct xsk_cb_desc {
16 void *src;
17 u8 off;
18 u8 bytes;
19};
20
21#ifdef CONFIG_XDP_SOCKETS
22
23void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26void xsk_tx_release(struct xsk_buff_pool *pool);
27struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 u16 queue_id);
29void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34
35static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36{
37 return XDP_PACKET_HEADROOM + pool->headroom;
38}
39
40static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41{
42 return pool->chunk_size;
43}
44
45static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46{
47 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48}
49
50static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 struct xdp_rxq_info *rxq)
52{
53 xp_set_rxq_info(pool, rxq);
54}
55
56static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 struct xsk_cb_desc *desc)
58{
59 xp_fill_cb(pool, desc);
60}
61
62static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
63 unsigned long attrs)
64{
65 xp_dma_unmap(pool, attrs);
66}
67
68static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
69 struct device *dev, unsigned long attrs)
70{
71 struct xdp_umem *umem = pool->umem;
72
73 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
74}
75
76static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
77{
78 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
79
80 return xp_get_dma(xskb);
81}
82
83static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
84{
85 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
86
87 return xp_get_frame_dma(xskb);
88}
89
90static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
91{
92 return xp_alloc(pool);
93}
94
95static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
96{
97 return !xp_mb_desc(desc);
98}
99
100/* Returns as many entries as possible up to max. 0 <= N <= max. */
101static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
102{
103 return xp_alloc_batch(pool, xdp, max);
104}
105
106static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
107{
108 return xp_can_alloc(pool, count);
109}
110
111static inline void xsk_buff_free(struct xdp_buff *xdp)
112{
113 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
114 struct list_head *xskb_list = &xskb->pool->xskb_list;
115 struct xdp_buff_xsk *pos, *tmp;
116
117 if (likely(!xdp_buff_has_frags(xdp)))
118 goto out;
119
120 list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
121 list_del(&pos->list_node);
122 xp_free(pos);
123 }
124
125 xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
126out:
127 xp_free(xskb);
128}
129
130static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
131{
132 struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
133
134 list_add_tail(&frag->list_node, &frag->pool->xskb_list);
135}
136
137static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
138{
139 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
140 struct xdp_buff *ret = NULL;
141 struct xdp_buff_xsk *frag;
142
143 frag = list_first_entry_or_null(&xskb->pool->xskb_list,
144 struct xdp_buff_xsk, list_node);
145 if (frag) {
146 list_del(&frag->list_node);
147 ret = &frag->xdp;
148 }
149
150 return ret;
151}
152
153static inline void xsk_buff_del_tail(struct xdp_buff *tail)
154{
155 struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
156
157 list_del(&xskb->list_node);
158}
159
160static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
161{
162 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
163 struct xdp_buff_xsk *frag;
164
165 frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
166 list_node);
167 return &frag->xdp;
168}
169
170static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
171{
172 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
173 xdp->data_meta = xdp->data;
174 xdp->data_end = xdp->data + size;
175 xdp->flags = 0;
176}
177
178static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
179 u64 addr)
180{
181 return xp_raw_get_dma(pool, addr);
182}
183
184static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
185{
186 return xp_raw_get_data(pool, addr);
187}
188
189#define XDP_TXMD_FLAGS_VALID ( \
190 XDP_TXMD_FLAGS_TIMESTAMP | \
191 XDP_TXMD_FLAGS_CHECKSUM | \
192 0)
193
194static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
195{
196 return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
197}
198
199static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
200{
201 struct xsk_tx_metadata *meta;
202
203 if (!pool->tx_metadata_len)
204 return NULL;
205
206 meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
207 if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
208 return NULL; /* no way to signal the error to the user */
209
210 return meta;
211}
212
213static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
214{
215 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
216
217 xp_dma_sync_for_cpu(xskb);
218}
219
220static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
221 dma_addr_t dma,
222 size_t size)
223{
224 xp_dma_sync_for_device(pool, dma, size);
225}
226
227#else
228
229static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
230{
231}
232
233static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
234 struct xdp_desc *desc)
235{
236 return false;
237}
238
239static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
240{
241 return 0;
242}
243
244static inline void xsk_tx_release(struct xsk_buff_pool *pool)
245{
246}
247
248static inline struct xsk_buff_pool *
249xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
250{
251 return NULL;
252}
253
254static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
255{
256}
257
258static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
259{
260}
261
262static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
263{
264}
265
266static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
267{
268}
269
270static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
271{
272 return false;
273}
274
275static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
276{
277 return 0;
278}
279
280static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
281{
282 return 0;
283}
284
285static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
286{
287 return 0;
288}
289
290static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
291 struct xdp_rxq_info *rxq)
292{
293}
294
295static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
296 struct xsk_cb_desc *desc)
297{
298}
299
300static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
301 unsigned long attrs)
302{
303}
304
305static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
306 struct device *dev, unsigned long attrs)
307{
308 return 0;
309}
310
311static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
312{
313 return 0;
314}
315
316static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
317{
318 return 0;
319}
320
321static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
322{
323 return NULL;
324}
325
326static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
327{
328 return false;
329}
330
331static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
332{
333 return 0;
334}
335
336static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
337{
338 return false;
339}
340
341static inline void xsk_buff_free(struct xdp_buff *xdp)
342{
343}
344
345static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
346{
347}
348
349static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
350{
351 return NULL;
352}
353
354static inline void xsk_buff_del_tail(struct xdp_buff *tail)
355{
356}
357
358static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
359{
360 return NULL;
361}
362
363static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
364{
365}
366
367static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
368 u64 addr)
369{
370 return 0;
371}
372
373static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
374{
375 return NULL;
376}
377
378static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
379{
380 return false;
381}
382
383static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
384{
385 return NULL;
386}
387
388static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
389{
390}
391
392static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
393 dma_addr_t dma,
394 size_t size)
395{
396}
397
398#endif /* CONFIG_XDP_SOCKETS */
399
400#endif /* _LINUX_XDP_SOCK_DRV_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
10#include <net/xsk_buff_pool.h>
11
12#define XDP_UMEM_MIN_CHUNK_SHIFT 11
13#define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14
15struct xsk_cb_desc {
16 void *src;
17 u8 off;
18 u8 bytes;
19};
20
21#ifdef CONFIG_XDP_SOCKETS
22
23void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26void xsk_tx_release(struct xsk_buff_pool *pool);
27struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 u16 queue_id);
29void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34
35static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36{
37 return XDP_PACKET_HEADROOM + pool->headroom;
38}
39
40static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41{
42 return pool->chunk_size;
43}
44
45static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46{
47 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48}
49
50static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 struct xdp_rxq_info *rxq)
52{
53 xp_set_rxq_info(pool, rxq);
54}
55
56static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 struct xsk_cb_desc *desc)
58{
59 xp_fill_cb(pool, desc);
60}
61
62static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
63{
64#ifdef CONFIG_NET_RX_BUSY_POLL
65 return pool->heads[0].xdp.rxq->napi_id;
66#else
67 return 0;
68#endif
69}
70
71static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
72 unsigned long attrs)
73{
74 xp_dma_unmap(pool, attrs);
75}
76
77static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
78 struct device *dev, unsigned long attrs)
79{
80 struct xdp_umem *umem = pool->umem;
81
82 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
83}
84
85static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
86{
87 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88
89 return xp_get_dma(xskb);
90}
91
92static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
93{
94 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
95
96 return xp_get_frame_dma(xskb);
97}
98
99static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
100{
101 return xp_alloc(pool);
102}
103
104static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
105{
106 return !xp_mb_desc(desc);
107}
108
109/* Returns as many entries as possible up to max. 0 <= N <= max. */
110static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
111{
112 return xp_alloc_batch(pool, xdp, max);
113}
114
115static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
116{
117 return xp_can_alloc(pool, count);
118}
119
120static inline void xsk_buff_free(struct xdp_buff *xdp)
121{
122 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
123 struct list_head *xskb_list = &xskb->pool->xskb_list;
124 struct xdp_buff_xsk *pos, *tmp;
125
126 if (likely(!xdp_buff_has_frags(xdp)))
127 goto out;
128
129 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
130 list_del(&pos->xskb_list_node);
131 xp_free(pos);
132 }
133
134 xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
135out:
136 xp_free(xskb);
137}
138
139static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
140{
141 struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142
143 list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
144}
145
146static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
147{
148 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
149 struct xdp_buff *ret = NULL;
150 struct xdp_buff_xsk *frag;
151
152 frag = list_first_entry_or_null(&xskb->pool->xskb_list,
153 struct xdp_buff_xsk, xskb_list_node);
154 if (frag) {
155 list_del(&frag->xskb_list_node);
156 ret = &frag->xdp;
157 }
158
159 return ret;
160}
161
162static inline void xsk_buff_del_tail(struct xdp_buff *tail)
163{
164 struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
165
166 list_del(&xskb->xskb_list_node);
167}
168
169static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
170{
171 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
172 struct xdp_buff_xsk *frag;
173
174 frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
175 xskb_list_node);
176 return &frag->xdp;
177}
178
179static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
180{
181 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
182 xdp->data_meta = xdp->data;
183 xdp->data_end = xdp->data + size;
184 xdp->flags = 0;
185}
186
187static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
188 u64 addr)
189{
190 return xp_raw_get_dma(pool, addr);
191}
192
193static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
194{
195 return xp_raw_get_data(pool, addr);
196}
197
198#define XDP_TXMD_FLAGS_VALID ( \
199 XDP_TXMD_FLAGS_TIMESTAMP | \
200 XDP_TXMD_FLAGS_CHECKSUM | \
201 0)
202
203static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
204{
205 return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
206}
207
208static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
209{
210 struct xsk_tx_metadata *meta;
211
212 if (!pool->tx_metadata_len)
213 return NULL;
214
215 meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
216 if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
217 return NULL; /* no way to signal the error to the user */
218
219 return meta;
220}
221
222static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
223{
224 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
225
226 if (!pool->dma_need_sync)
227 return;
228
229 xp_dma_sync_for_cpu(xskb);
230}
231
232static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
233 dma_addr_t dma,
234 size_t size)
235{
236 xp_dma_sync_for_device(pool, dma, size);
237}
238
239#else
240
241static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
242{
243}
244
245static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
246 struct xdp_desc *desc)
247{
248 return false;
249}
250
251static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
252{
253 return 0;
254}
255
256static inline void xsk_tx_release(struct xsk_buff_pool *pool)
257{
258}
259
260static inline struct xsk_buff_pool *
261xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
262{
263 return NULL;
264}
265
266static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
267{
268}
269
270static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
271{
272}
273
274static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
275{
276}
277
278static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
279{
280}
281
282static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
283{
284 return false;
285}
286
287static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
288{
289 return 0;
290}
291
292static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
293{
294 return 0;
295}
296
297static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
298{
299 return 0;
300}
301
302static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
303 struct xdp_rxq_info *rxq)
304{
305}
306
307static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
308 struct xsk_cb_desc *desc)
309{
310}
311
312static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool)
313{
314 return 0;
315}
316
317static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
318 unsigned long attrs)
319{
320}
321
322static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
323 struct device *dev, unsigned long attrs)
324{
325 return 0;
326}
327
328static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
329{
330 return 0;
331}
332
333static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
334{
335 return 0;
336}
337
338static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
339{
340 return NULL;
341}
342
343static inline bool xsk_is_eop_desc(struct xdp_desc *desc)
344{
345 return false;
346}
347
348static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
349{
350 return 0;
351}
352
353static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
354{
355 return false;
356}
357
358static inline void xsk_buff_free(struct xdp_buff *xdp)
359{
360}
361
362static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
363{
364}
365
366static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
367{
368 return NULL;
369}
370
371static inline void xsk_buff_del_tail(struct xdp_buff *tail)
372{
373}
374
375static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
376{
377 return NULL;
378}
379
380static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
381{
382}
383
384static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
385 u64 addr)
386{
387 return 0;
388}
389
390static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
391{
392 return NULL;
393}
394
395static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
396{
397 return false;
398}
399
400static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
401{
402 return NULL;
403}
404
405static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
406{
407}
408
409static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
410 dma_addr_t dma,
411 size_t size)
412{
413}
414
415#endif /* CONFIG_XDP_SOCKETS */
416
417#endif /* _LINUX_XDP_SOCK_DRV_H */